[
  {
    "path": ".gitignore",
    "content": "CMakeLists.txt.user\n\nRT_output*.png\n\n.vscode\n.vs\n\nbuild*\ndata/OBJs/*.blend\ndata/OBJs/*.obj\ndata/OBJs/*.mtl\ndata/Skyspheres/*.hdr\ndata/OBJs/Projects/\ndata/Skyspheres/evening_road_01_puresky_8k.hdr\ndata/Skyspheres/AllSkyFree_Sky_EpicGloriousPink_EquirectDebug.jpg\ndata/Skyspheres/AllSkyFree_Sky_EpicGloriousPink_Equirect.jpg\noidn/bin/\noidn/doc/\noidn/lib/\n\nthirdparties/\n\ncontrib/\nhiprt/\n\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"Orochi-Fork\"]\n\tpath = thirdparties/Orochi-Fork\n\turl = https://github.com/TomClabault/Orochi.git\n[submodule \"HIPRT-Fork\"]\n\tpath = thirdparties/HIPRT-Fork\n\turl = https://github.com/TomClabault/HIPRT.git\n[submodule \"ASSIMP-Fork\"]\n\tpath = thirdparties/ASSIMP-Fork\n\turl = https://github.com/TomClabault/assimp.git\n[submodule \"thirdparties/imgui\"]\n\tpath = thirdparties/imgui\n\turl = https://github.com/ocornut/imgui.git\n[submodule \"thirdparties/tracy\"]\n\tpath = thirdparties/tracy\n\turl = https://github.com/wolfpld/tracy.git\n[submodule \"thirdparties/libtinyfiledialogs\"]\n\tpath = thirdparties/libtinyfiledialogs\n\turl = https://github.com/TomClabault/libtinyfiledialogs.git\n[submodule \"thirdparties/clip\"]\n\tpath = thirdparties/clip\n\turl = https://github.com/dacap/clip.git\n"
  },
  {
    "path": "CMakeLists.txt",
    "content": "cmake_minimum_required(VERSION 3.24)\n\nproject(HIPRTPathTracer LANGUAGES CXX)\n\n# To be able to use the ExternalProject_Add() command\ninclude(ExternalProject)\ninclude(FetchContent)\n# To see the progress of FetchContent\nSet(FETCHCONTENT_QUIET FALSE)\n# Policy for what timestamp to use when downloading stuff with FetchContent / ExternelProject / ...\n# NEW sets the timestamps to the extraction time\ncmake_policy(SET CMP0135 NEW)\n\n# If the build type wasn't given on the commandline, we're defaulting to release\nif(NOT CMAKE_BUILD_TYPE)\n  set(CMAKE_BUILD_TYPE \"Debug\")\n  message(STATUS \"Build type not specified: Using Debug by default\")\nendif()\n\n# Sets up ASSIMP library CMake variable to prepare the building step\ninclude (cmake/SetupASSIMP.cmake)\n\n# Open Image Denoise binaries\ninclude(cmake/SetupOIDN.cmake)\n\n# Preparing HIPRT\ninclude(cmake/SetupHIPRT.cmake)\n\n# Preparing Orochi\ninclude(cmake/SetupOrochi.cmake)\n\n# Include Tracy for OpenGL profiling\ninclude(cmake/SetupTracy.cmake)\n\n# Clip for copying images to clipboard\ninclude(cmake/Clip.cmake)\n\nset(GLFW_LIB_DIR \"thirdparties/opengl/lib/GLFW\")\nset(GLEW_LIB_DIR \"thirdparties/opengl/lib/GLEW\")\nset(GLEW_BIN_DIR \"thirdparties/opengl/bin/GLEW\")\nset(TRACY_PUBLIC_DIR \"thirdparties/tracy/public\")\n\n# Using CMake here to define C++ macros that will be used to find the directory of the kernels, etc...\n# in the C++ code. This basically avoids hardcoding the path to the kernels in C++ and instead\n# use the more flexible approach of defining it in the CMake\nadd_compile_definitions(DEVICE_KERNELS_DIRECTORY=\"../src/Device/kernels\")\nadd_compile_definitions(DEVICE_INCLUDES_DIRECTORY=\"../src/\") # This gives access to Device/ and HostDeviceCommon/\nadd_compile_definitions(OROCHI_INCLUDES_DIRECTORY=\"${OROCHI_SOURCES_DIR}/..\") # This gives access to <Orochi/Orochi.h> in the kernels\nadd_compile_definitions(GLSL_SHADERS_DIRECTORY=\"../src/Shaders\")\nadd_compile_definitions(DATA_DIRECTORY=\"${CMAKE_SOURCE_DIR}/data\")\nadd_compile_definitions(BRDFS_DATA_DIRECTORY=\"${CMAKE_SOURCE_DIR}/data/BRDFsData\")\n#add_compile_definitions(TRACY_ENABLE=\"1\")\n\nlink_directories(${CMAKE_SOURCE_DIR}/${GLFW_LIB_DIR})\nlink_directories(${CMAKE_SOURCE_DIR}/${GLEW_LIB_DIR})\n\nfile(GLOB_RECURSE SOURCE_FILES src/*.cpp src/*.h)\nfile(GLOB_RECURSE OPENGL_HEADERS thirdparties/opengl/include/*.h)\nfile(GLOB_RECURSE STBI_HEADERS thirdparties/stbi/*.h)\n# Selecting only what we need from the whole ImGui submodule\nfile(GLOB_RECURSE IMGUI_FILES thirdparties/imgui/imgui.h\n\tthirdparties/imgui/imgui.cpp\n\tthirdparties/imgui/imgui_demo.cpp\n\tthirdparties/imgui/imgui_draw.cpp\n\tthirdparties/imgui/imgui_tables.cpp\n\tthirdparties/imgui/imgui_widgets.cpp\n\tthirdparties/imgui/backends/imgui_impl_glfw.cpp\n\tthirdparties/imgui/backends/imgui_impl_opengl3.cpp\n\tthirdparties/imgui/misc/cpp/imgui_stdlib.cpp)\n\nfile(GLOB_RECURSE DEVICE_SOURCES src/Device/*.h)\nfile(GLOB_RECURSE GLSL_SHADERS src/Shaders/*.frag src/Shaders/*.vert)\nfile(GLOB_RECURSE HIPRT_HEADERS ${HIPRT_HEADERS_DIR}/*.h)\nfile(GLOB_RECURSE OROCHI_SOURCES_AND_HEADERS ${OROCHI_SOURCES_DIR}/*.h ${OROCHI_SOURCES_DIR}/*.cpp)\nfile(GLOB_RECURSE CUEW_SOURCES_AND_HEADERS ${CUEW_SOURCES_DIR}/*.h ${CUEW_SOURCES_DIR}/*.cpp)\nfile(GLOB_RECURSE HIPEW_SOURCES_AND_HEADERS ${HIPEW_SOURCES_DIR}/*.h ${HIPEW_SOURCES_DIR}/*.cpp)\nfile(GLOB_RECURSE TINYFILEDIALOGS_SOURCE_AND_HEADERS thirdparties/libtinyfiledialogs/tinyfiledialogs.h thirdparties/libtinyfiledialogs/tinyfiledialogs.cpp)\nfile(GLOB_RECURSE NVIDIA_FLIP_HEADERS thirdparties/nvidia-FLIP/*.h)\n\nadd_executable(HIPRTPathTracer\n\t${SOURCE_FILES}\n\n\t${OPENGL_HEADERS}\n\t${STBI_HEADERS}\n\t${IMGUI_FILES}\n\t${ASSIMP_HEADERS}\n\n\t${DEVICE_SOURCES}\n\t${GLSL_SHADERS}\n\t${HIPRT_HEADERS}\n\t${OROCHI_SOURCES_AND_HEADERS}\n\t${CUEW_SOURCES_AND_HEADERS}\n\t${HIPEW_SOURCES_AND_HEADERS}\n\t${TINYFILEDIALOGS_SOURCE_AND_HEADERS}\n\t${NVIDIA_FLIP_HEADERS}\n)\n\nset_property(TARGET HIPRTPathTracer PROPERTY CXX_STANDARD 20)\n\nfind_package(OpenMP REQUIRED)\nfind_package(OpenGL REQUIRED)\nfind_package(OpenImageDenoise REQUIRED HINTS ${oidnbinaries_SOURCE_DIR}) # HINTS to indicate a folder to search for the library in\n\nif (WIN32)\n\t# \"version\" is a library from the Windows SDK\n\ttarget_link_libraries(HIPRTPathTracer PRIVATE OpenMP::OpenMP_CXX assimp OpenImageDenoise ${OPENGL_LIBRARY} glfw3 glew32 hiprt02004 TracyClient clip version)\nelseif(UNIX)\n\tfind_package(GLEW REQUIRED)\n\ttarget_link_libraries(HIPRTPathTracer PRIVATE OpenMP::OpenMP_CXX assimp OpenImageDenoise ${OPENGL_LIBRARY} glfw GLEW::GLEW hiprt02004 clip TracyClient)\nendif()\n\ntarget_include_directories(HIPRTPathTracer PRIVATE \"src/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/opengl/include\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/stbi/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/libtinyfiledialogs/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/glm/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/imgui/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/imgui/backends\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/tinyexr/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/nvidia-FLIP/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \"thirdparties/clip/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE ${HIPRT_HEADERS_DIR}/..)\ntarget_include_directories(HIPRTPathTracer PRIVATE ${OROCHI_SOURCES_DIR}/..)\ntarget_include_directories(HIPRTPathTracer PRIVATE \"${EXTERNAL_ASSIMP_INSTALL_LOCATION}/include/\")\ntarget_include_directories(HIPRTPathTracer PRIVATE \".\")\ntarget_include_directories(HIPRTPathTracer PRIVATE ${TRACY_PUBLIC_DIR})\n\n# Auto setup of Orochi for NVIDIA by including their cmake file\ninclude(${HIPRT_SUBMODULE_DIR}/contrib/Orochi/Orochi/enable_cuew.cmake)\n\nif (WIN32)\n\tmessage(STATUS \"Copying OpenImageDenoise binaries...\")\n\tfile(GLOB OIDN_BINARIES ${oidnbinaries_SOURCE_DIR}/bin/*.dll)\n\tfile(COPY ${OIDN_BINARIES} DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/)\n\n\t# For copying HIPRT's DLL\n\tget_target_property(HIPRT_DLL_NAME hiprt02004 OUTPUT_NAME)\n\n\t#if (${CMAKE_BUILD_TYPE} STREQUAL \"Debug\" AND MSVC_IDE)\n\t\t# Adding the 'd' suffix that MSVC adds to debug libraries file names\n\t\tset(HIPRT_DLL_NAME_DEBUG ${HIPRT_DLL_NAME}d.dll)\n\t#endif()\n\t# Appending .dll extension\n\tset(HIPRT_DLL_NAME ${HIPRT_DLL_NAME}.dll)\n\n\tadd_custom_command(OUTPUT\n        ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME}\n    COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:hiprt02004>\n        ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME}\n    DEPENDS hiprt02004)\n\n\tadd_custom_command(OUTPUT\n        ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME_DEBUG}\n    COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:hiprt02004>\n        ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME_DEBUG}\n    DEPENDS hiprt02004)\n\n\t# Create target which consume the command via DEPENDS.\n\tadd_custom_target(hiprtCopyDLL ALL DEPENDS ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME} ${CMAKE_BINARY_DIR}/${HIPRT_DLL_NAME_DEBUG})\n\tadd_dependencies(HIPRTPathTracer hiprtCopyDLL)\n\n\tmessage(STATUS \"Copying Glew binaries...\")\n\tfile(COPY ${CMAKE_SOURCE_DIR}/${GLEW_BIN_DIR}/glew32.dll DESTINATION ${CMAKE_CURRENT_BINARY_DIR})\nendif()\n\n\n\nif(MSVC_IDE)\n\t# Enabling parallel compilation on MSVC which isn't enabled by default\n\tif(MSVC)\n\t\tadd_definitions(/MP)\n\n\t\tif (${CMAKE_BUILD_TYPE} STREQUAL \"RelWithDebInfo\")\n\t\t\t# In RelWithDebInfo, we're disabling all optimizations\n\t\t\t# for easier debugging:\n\t\t\t#\t- RelWithDebInfo is faster than debug so we want to use\n\t\t\t#\tthat when debugging line by line\n\t\t\t#\t\n\t\t\t#\t- But the optimizations that are enabled by default in\n\t\t\t#\tRelWithDebInfo still mess up with MSVC debugger and the\n\t\t\t#\tdebugger jumps everywhere, variables are optimized away etc...\n\t\t\t#\tEven if we're supposed to include debug infos in the compilation...\n\t\t\t#\tSo we're just disabling optimizations then\n\n\t\t\t# No optimizations\n\t\t\tadd_definitions(/Od)\n\n\t\t\t# No inlining of functions\n\t\t\tadd_definitions(/Ob0)\n\t\tendif()\n\tendif()\n\n\t# Macro to preserve nice beautiful source files hierarchy in Visual Studio\n\tmacro(GroupSources curdir)\n\t\tfile(GLOB children RELATIVE ${PROJECT_SOURCE_DIR}/${curdir} ${PROJECT_SOURCE_DIR}/${curdir}/*)\n\n\t\tforeach(child ${children})\n\t\t\tif(IS_DIRECTORY ${PROJECT_SOURCE_DIR}/${curdir}/${child})\n\t\t\t\tGroupSources(${curdir}/${child})\n\t\t\telse()\n\t\t\t\tstring(REPLACE \"/\" \"\\\\\" groupname ${curdir})\n\t\t\t\tstring(REPLACE \"src\" \"Sources\" groupname ${groupname})\n\t\t\t\tsource_group(${groupname} FILES ${PROJECT_SOURCE_DIR}/${curdir}/${child})\n\t\t\tendif()\n\t\tendforeach()\n\tendmacro()\n\n\t# Run macro\n\tGroupSources(src)\n\t\n\t# Creating a Visual Studio folder for the targets we don't care about so we have\n\t# a way to have our IDE look clean\n\tset_property(GLOBAL PROPERTY USE_FOLDERS ON)\n\tset_target_properties(\n\t\tassimp uninstall zlibstatic UpdateAssimpLibsDebugSymbolsAndDLLs # ASSIMP Targets\n\t\thiprt02004 hiprtCopyDLL # HIPRT Targets\n\t\tTracyClient # Tracy\n\t\thello_world copy clip_user_format_tests clip_text_tests clip_image_tests clip int_format paste put_image show_image # Clip\n\t\tPROPERTIES FOLDER ExternalTargets)\nendif()\n"
  },
  {
    "path": "COPYING",
    "content": "                    GNU GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n                            Preamble\n\n  The GNU General Public License is a free, copyleft license for\nsoftware and other kinds of works.\n\n  The licenses for most software and other practical works are designed\nto take away your freedom to share and change the works.  By contrast,\nthe GNU General Public License is intended to guarantee your freedom to\nshare and change all versions of a program--to make sure it remains free\nsoftware for all its users.  We, the Free Software Foundation, use the\nGNU General Public License for most of our software; it applies also to\nany other work released this way by its authors.  You can apply it to\nyour programs, too.\n\n  When we speak of free software, we are referring to freedom, not\nprice.  Our General Public Licenses are designed to make sure that you\nhave the freedom to distribute copies of free software (and charge for\nthem if you wish), that you receive source code or can get it if you\nwant it, that you can change the software or use pieces of it in new\nfree programs, and that you know you can do these things.\n\n  To protect your rights, we need to prevent others from denying you\nthese rights or asking you to surrender the rights.  Therefore, you have\ncertain responsibilities if you distribute copies of the software, or if\nyou modify it: responsibilities to respect the freedom of others.\n\n  For example, if you distribute copies of such a program, whether\ngratis or for a fee, you must pass on to the recipients the same\nfreedoms that you received.  You must make sure that they, too, receive\nor can get the source code.  And you must show them these terms so they\nknow their rights.\n\n  Developers that use the GNU GPL protect your rights with two steps:\n(1) assert copyright on the software, and (2) offer you this License\ngiving you legal permission to copy, distribute and/or modify it.\n\n  For the developers' and authors' protection, the GPL clearly explains\nthat there is no warranty for this free software.  For both users' and\nauthors' sake, the GPL requires that modified versions be marked as\nchanged, so that their problems will not be attributed erroneously to\nauthors of previous versions.\n\n  Some devices are designed to deny users access to install or run\nmodified versions of the software inside them, although the manufacturer\ncan do so.  This is fundamentally incompatible with the aim of\nprotecting users' freedom to change the software.  The systematic\npattern of such abuse occurs in the area of products for individuals to\nuse, which is precisely where it is most unacceptable.  Therefore, we\nhave designed this version of the GPL to prohibit the practice for those\nproducts.  If such problems arise substantially in other domains, we\nstand ready to extend this provision to those domains in future versions\nof the GPL, as needed to protect the freedom of users.\n\n  Finally, every program is threatened constantly by software patents.\nStates should not allow patents to restrict development and use of\nsoftware on general-purpose computers, but in those that do, we wish to\navoid the special danger that patents applied to a free program could\nmake it effectively proprietary.  To prevent this, the GPL assures that\npatents cannot be used to render the program non-free.\n\n  The precise terms and conditions for copying, distribution and\nmodification follow.\n\n                       TERMS AND CONDITIONS\n\n  0. Definitions.\n\n  \"This License\" refers to version 3 of the GNU General Public License.\n\n  \"Copyright\" also means copyright-like laws that apply to other kinds of\nworks, such as semiconductor masks.\n\n  \"The Program\" refers to any copyrightable work licensed under this\nLicense.  Each licensee is addressed as \"you\".  \"Licensees\" and\n\"recipients\" may be individuals or organizations.\n\n  To \"modify\" a work means to copy from or adapt all or part of the work\nin a fashion requiring copyright permission, other than the making of an\nexact copy.  The resulting work is called a \"modified version\" of the\nearlier work or a work \"based on\" the earlier work.\n\n  A \"covered work\" means either the unmodified Program or a work based\non the Program.\n\n  To \"propagate\" a work means to do anything with it that, without\npermission, would make you directly or secondarily liable for\ninfringement under applicable copyright law, except executing it on a\ncomputer or modifying a private copy.  Propagation includes copying,\ndistribution (with or without modification), making available to the\npublic, and in some countries other activities as well.\n\n  To \"convey\" a work means any kind of propagation that enables other\nparties to make or receive copies.  Mere interaction with a user through\na computer network, with no transfer of a copy, is not conveying.\n\n  An interactive user interface displays \"Appropriate Legal Notices\"\nto the extent that it includes a convenient and prominently visible\nfeature that (1) displays an appropriate copyright notice, and (2)\ntells the user that there is no warranty for the work (except to the\nextent that warranties are provided), that licensees may convey the\nwork under this License, and how to view a copy of this License.  If\nthe interface presents a list of user commands or options, such as a\nmenu, a prominent item in the list meets this criterion.\n\n  1. Source Code.\n\n  The \"source code\" for a work means the preferred form of the work\nfor making modifications to it.  \"Object code\" means any non-source\nform of a work.\n\n  A \"Standard Interface\" means an interface that either is an official\nstandard defined by a recognized standards body, or, in the case of\ninterfaces specified for a particular programming language, one that\nis widely used among developers working in that language.\n\n  The \"System Libraries\" of an executable work include anything, other\nthan the work as a whole, that (a) is included in the normal form of\npackaging a Major Component, but which is not part of that Major\nComponent, and (b) serves only to enable use of the work with that\nMajor Component, or to implement a Standard Interface for which an\nimplementation is available to the public in source code form.  A\n\"Major Component\", in this context, means a major essential component\n(kernel, window system, and so on) of the specific operating system\n(if any) on which the executable work runs, or a compiler used to\nproduce the work, or an object code interpreter used to run it.\n\n  The \"Corresponding Source\" for a work in object code form means all\nthe source code needed to generate, install, and (for an executable\nwork) run the object code and to modify the work, including scripts to\ncontrol those activities.  However, it does not include the work's\nSystem Libraries, or general-purpose tools or generally available free\nprograms which are used unmodified in performing those activities but\nwhich are not part of the work.  For example, Corresponding Source\nincludes interface definition files associated with source files for\nthe work, and the source code for shared libraries and dynamically\nlinked subprograms that the work is specifically designed to require,\nsuch as by intimate data communication or control flow between those\nsubprograms and other parts of the work.\n\n  The Corresponding Source need not include anything that users\ncan regenerate automatically from other parts of the Corresponding\nSource.\n\n  The Corresponding Source for a work in source code form is that\nsame work.\n\n  2. Basic Permissions.\n\n  All rights granted under this License are granted for the term of\ncopyright on the Program, and are irrevocable provided the stated\nconditions are met.  This License explicitly affirms your unlimited\npermission to run the unmodified Program.  The output from running a\ncovered work is covered by this License only if the output, given its\ncontent, constitutes a covered work.  This License acknowledges your\nrights of fair use or other equivalent, as provided by copyright law.\n\n  You may make, run and propagate covered works that you do not\nconvey, without conditions so long as your license otherwise remains\nin force.  You may convey covered works to others for the sole purpose\nof having them make modifications exclusively for you, or provide you\nwith facilities for running those works, provided that you comply with\nthe terms of this License in conveying all material for which you do\nnot control copyright.  Those thus making or running the covered works\nfor you must do so exclusively on your behalf, under your direction\nand control, on terms that prohibit them from making any copies of\nyour copyrighted material outside their relationship with you.\n\n  Conveying under any other circumstances is permitted solely under\nthe conditions stated below.  Sublicensing is not allowed; section 10\nmakes it unnecessary.\n\n  3. Protecting Users' Legal Rights From Anti-Circumvention Law.\n\n  No covered work shall be deemed part of an effective technological\nmeasure under any applicable law fulfilling obligations under article\n11 of the WIPO copyright treaty adopted on 20 December 1996, or\nsimilar laws prohibiting or restricting circumvention of such\nmeasures.\n\n  When you convey a covered work, you waive any legal power to forbid\ncircumvention of technological measures to the extent such circumvention\nis effected by exercising rights under this License with respect to\nthe covered work, and you disclaim any intention to limit operation or\nmodification of the work as a means of enforcing, against the work's\nusers, your or third parties' legal rights to forbid circumvention of\ntechnological measures.\n\n  4. Conveying Verbatim Copies.\n\n  You may convey verbatim copies of the Program's source code as you\nreceive it, in any medium, provided that you conspicuously and\nappropriately publish on each copy an appropriate copyright notice;\nkeep intact all notices stating that this License and any\nnon-permissive terms added in accord with section 7 apply to the code;\nkeep intact all notices of the absence of any warranty; and give all\nrecipients a copy of this License along with the Program.\n\n  You may charge any price or no price for each copy that you convey,\nand you may offer support or warranty protection for a fee.\n\n  5. Conveying Modified Source Versions.\n\n  You may convey a work based on the Program, or the modifications to\nproduce it from the Program, in the form of source code under the\nterms of section 4, provided that you also meet all of these conditions:\n\n    a) The work must carry prominent notices stating that you modified\n    it, and giving a relevant date.\n\n    b) The work must carry prominent notices stating that it is\n    released under this License and any conditions added under section\n    7.  This requirement modifies the requirement in section 4 to\n    \"keep intact all notices\".\n\n    c) You must license the entire work, as a whole, under this\n    License to anyone who comes into possession of a copy.  This\n    License will therefore apply, along with any applicable section 7\n    additional terms, to the whole of the work, and all its parts,\n    regardless of how they are packaged.  This License gives no\n    permission to license the work in any other way, but it does not\n    invalidate such permission if you have separately received it.\n\n    d) If the work has interactive user interfaces, each must display\n    Appropriate Legal Notices; however, if the Program has interactive\n    interfaces that do not display Appropriate Legal Notices, your\n    work need not make them do so.\n\n  A compilation of a covered work with other separate and independent\nworks, which are not by their nature extensions of the covered work,\nand which are not combined with it such as to form a larger program,\nin or on a volume of a storage or distribution medium, is called an\n\"aggregate\" if the compilation and its resulting copyright are not\nused to limit the access or legal rights of the compilation's users\nbeyond what the individual works permit.  Inclusion of a covered work\nin an aggregate does not cause this License to apply to the other\nparts of the aggregate.\n\n  6. Conveying Non-Source Forms.\n\n  You may convey a covered work in object code form under the terms\nof sections 4 and 5, provided that you also convey the\nmachine-readable Corresponding Source under the terms of this License,\nin one of these ways:\n\n    a) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by the\n    Corresponding Source fixed on a durable physical medium\n    customarily used for software interchange.\n\n    b) Convey the object code in, or embodied in, a physical product\n    (including a physical distribution medium), accompanied by a\n    written offer, valid for at least three years and valid for as\n    long as you offer spare parts or customer support for that product\n    model, to give anyone who possesses the object code either (1) a\n    copy of the Corresponding Source for all the software in the\n    product that is covered by this License, on a durable physical\n    medium customarily used for software interchange, for a price no\n    more than your reasonable cost of physically performing this\n    conveying of source, or (2) access to copy the\n    Corresponding Source from a network server at no charge.\n\n    c) Convey individual copies of the object code with a copy of the\n    written offer to provide the Corresponding Source.  This\n    alternative is allowed only occasionally and noncommercially, and\n    only if you received the object code with such an offer, in accord\n    with subsection 6b.\n\n    d) Convey the object code by offering access from a designated\n    place (gratis or for a charge), and offer equivalent access to the\n    Corresponding Source in the same way through the same place at no\n    further charge.  You need not require recipients to copy the\n    Corresponding Source along with the object code.  If the place to\n    copy the object code is a network server, the Corresponding Source\n    may be on a different server (operated by you or a third party)\n    that supports equivalent copying facilities, provided you maintain\n    clear directions next to the object code saying where to find the\n    Corresponding Source.  Regardless of what server hosts the\n    Corresponding Source, you remain obligated to ensure that it is\n    available for as long as needed to satisfy these requirements.\n\n    e) Convey the object code using peer-to-peer transmission, provided\n    you inform other peers where the object code and Corresponding\n    Source of the work are being offered to the general public at no\n    charge under subsection 6d.\n\n  A separable portion of the object code, whose source code is excluded\nfrom the Corresponding Source as a System Library, need not be\nincluded in conveying the object code work.\n\n  A \"User Product\" is either (1) a \"consumer product\", which means any\ntangible personal property which is normally used for personal, family,\nor household purposes, or (2) anything designed or sold for incorporation\ninto a dwelling.  In determining whether a product is a consumer product,\ndoubtful cases shall be resolved in favor of coverage.  For a particular\nproduct received by a particular user, \"normally used\" refers to a\ntypical or common use of that class of product, regardless of the status\nof the particular user or of the way in which the particular user\nactually uses, or expects or is expected to use, the product.  A product\nis a consumer product regardless of whether the product has substantial\ncommercial, industrial or non-consumer uses, unless such uses represent\nthe only significant mode of use of the product.\n\n  \"Installation Information\" for a User Product means any methods,\nprocedures, authorization keys, or other information required to install\nand execute modified versions of a covered work in that User Product from\na modified version of its Corresponding Source.  The information must\nsuffice to ensure that the continued functioning of the modified object\ncode is in no case prevented or interfered with solely because\nmodification has been made.\n\n  If you convey an object code work under this section in, or with, or\nspecifically for use in, a User Product, and the conveying occurs as\npart of a transaction in which the right of possession and use of the\nUser Product is transferred to the recipient in perpetuity or for a\nfixed term (regardless of how the transaction is characterized), the\nCorresponding Source conveyed under this section must be accompanied\nby the Installation Information.  But this requirement does not apply\nif neither you nor any third party retains the ability to install\nmodified object code on the User Product (for example, the work has\nbeen installed in ROM).\n\n  The requirement to provide Installation Information does not include a\nrequirement to continue to provide support service, warranty, or updates\nfor a work that has been modified or installed by the recipient, or for\nthe User Product in which it has been modified or installed.  Access to a\nnetwork may be denied when the modification itself materially and\nadversely affects the operation of the network or violates the rules and\nprotocols for communication across the network.\n\n  Corresponding Source conveyed, and Installation Information provided,\nin accord with this section must be in a format that is publicly\ndocumented (and with an implementation available to the public in\nsource code form), and must require no special password or key for\nunpacking, reading or copying.\n\n  7. Additional Terms.\n\n  \"Additional permissions\" are terms that supplement the terms of this\nLicense by making exceptions from one or more of its conditions.\nAdditional permissions that are applicable to the entire Program shall\nbe treated as though they were included in this License, to the extent\nthat they are valid under applicable law.  If additional permissions\napply only to part of the Program, that part may be used separately\nunder those permissions, but the entire Program remains governed by\nthis License without regard to the additional permissions.\n\n  When you convey a copy of a covered work, you may at your option\nremove any additional permissions from that copy, or from any part of\nit.  (Additional permissions may be written to require their own\nremoval in certain cases when you modify the work.)  You may place\nadditional permissions on material, added by you to a covered work,\nfor which you have or can give appropriate copyright permission.\n\n  Notwithstanding any other provision of this License, for material you\nadd to a covered work, you may (if authorized by the copyright holders of\nthat material) supplement the terms of this License with terms:\n\n    a) Disclaiming warranty or limiting liability differently from the\n    terms of sections 15 and 16 of this License; or\n\n    b) Requiring preservation of specified reasonable legal notices or\n    author attributions in that material or in the Appropriate Legal\n    Notices displayed by works containing it; or\n\n    c) Prohibiting misrepresentation of the origin of that material, or\n    requiring that modified versions of such material be marked in\n    reasonable ways as different from the original version; or\n\n    d) Limiting the use for publicity purposes of names of licensors or\n    authors of the material; or\n\n    e) Declining to grant rights under trademark law for use of some\n    trade names, trademarks, or service marks; or\n\n    f) Requiring indemnification of licensors and authors of that\n    material by anyone who conveys the material (or modified versions of\n    it) with contractual assumptions of liability to the recipient, for\n    any liability that these contractual assumptions directly impose on\n    those licensors and authors.\n\n  All other non-permissive additional terms are considered \"further\nrestrictions\" within the meaning of section 10.  If the Program as you\nreceived it, or any part of it, contains a notice stating that it is\ngoverned by this License along with a term that is a further\nrestriction, you may remove that term.  If a license document contains\na further restriction but permits relicensing or conveying under this\nLicense, you may add to a covered work material governed by the terms\nof that license document, provided that the further restriction does\nnot survive such relicensing or conveying.\n\n  If you add terms to a covered work in accord with this section, you\nmust place, in the relevant source files, a statement of the\nadditional terms that apply to those files, or a notice indicating\nwhere to find the applicable terms.\n\n  Additional terms, permissive or non-permissive, may be stated in the\nform of a separately written license, or stated as exceptions;\nthe above requirements apply either way.\n\n  8. Termination.\n\n  You may not propagate or modify a covered work except as expressly\nprovided under this License.  Any attempt otherwise to propagate or\nmodify it is void, and will automatically terminate your rights under\nthis License (including any patent licenses granted under the third\nparagraph of section 11).\n\n  However, if you cease all violation of this License, then your\nlicense from a particular copyright holder is reinstated (a)\nprovisionally, unless and until the copyright holder explicitly and\nfinally terminates your license, and (b) permanently, if the copyright\nholder fails to notify you of the violation by some reasonable means\nprior to 60 days after the cessation.\n\n  Moreover, your license from a particular copyright holder is\nreinstated permanently if the copyright holder notifies you of the\nviolation by some reasonable means, this is the first time you have\nreceived notice of violation of this License (for any work) from that\ncopyright holder, and you cure the violation prior to 30 days after\nyour receipt of the notice.\n\n  Termination of your rights under this section does not terminate the\nlicenses of parties who have received copies or rights from you under\nthis License.  If your rights have been terminated and not permanently\nreinstated, you do not qualify to receive new licenses for the same\nmaterial under section 10.\n\n  9. Acceptance Not Required for Having Copies.\n\n  You are not required to accept this License in order to receive or\nrun a copy of the Program.  Ancillary propagation of a covered work\noccurring solely as a consequence of using peer-to-peer transmission\nto receive a copy likewise does not require acceptance.  However,\nnothing other than this License grants you permission to propagate or\nmodify any covered work.  These actions infringe copyright if you do\nnot accept this License.  Therefore, by modifying or propagating a\ncovered work, you indicate your acceptance of this License to do so.\n\n  10. Automatic Licensing of Downstream Recipients.\n\n  Each time you convey a covered work, the recipient automatically\nreceives a license from the original licensors, to run, modify and\npropagate that work, subject to this License.  You are not responsible\nfor enforcing compliance by third parties with this License.\n\n  An \"entity transaction\" is a transaction transferring control of an\norganization, or substantially all assets of one, or subdividing an\norganization, or merging organizations.  If propagation of a covered\nwork results from an entity transaction, each party to that\ntransaction who receives a copy of the work also receives whatever\nlicenses to the work the party's predecessor in interest had or could\ngive under the previous paragraph, plus a right to possession of the\nCorresponding Source of the work from the predecessor in interest, if\nthe predecessor has it or can get it with reasonable efforts.\n\n  You may not impose any further restrictions on the exercise of the\nrights granted or affirmed under this License.  For example, you may\nnot impose a license fee, royalty, or other charge for exercise of\nrights granted under this License, and you may not initiate litigation\n(including a cross-claim or counterclaim in a lawsuit) alleging that\nany patent claim is infringed by making, using, selling, offering for\nsale, or importing the Program or any portion of it.\n\n  11. Patents.\n\n  A \"contributor\" is a copyright holder who authorizes use under this\nLicense of the Program or a work on which the Program is based.  The\nwork thus licensed is called the contributor's \"contributor version\".\n\n  A contributor's \"essential patent claims\" are all patent claims\nowned or controlled by the contributor, whether already acquired or\nhereafter acquired, that would be infringed by some manner, permitted\nby this License, of making, using, or selling its contributor version,\nbut do not include claims that would be infringed only as a\nconsequence of further modification of the contributor version.  For\npurposes of this definition, \"control\" includes the right to grant\npatent sublicenses in a manner consistent with the requirements of\nthis License.\n\n  Each contributor grants you a non-exclusive, worldwide, royalty-free\npatent license under the contributor's essential patent claims, to\nmake, use, sell, offer for sale, import and otherwise run, modify and\npropagate the contents of its contributor version.\n\n  In the following three paragraphs, a \"patent license\" is any express\nagreement or commitment, however denominated, not to enforce a patent\n(such as an express permission to practice a patent or covenant not to\nsue for patent infringement).  To \"grant\" such a patent license to a\nparty means to make such an agreement or commitment not to enforce a\npatent against the party.\n\n  If you convey a covered work, knowingly relying on a patent license,\nand the Corresponding Source of the work is not available for anyone\nto copy, free of charge and under the terms of this License, through a\npublicly available network server or other readily accessible means,\nthen you must either (1) cause the Corresponding Source to be so\navailable, or (2) arrange to deprive yourself of the benefit of the\npatent license for this particular work, or (3) arrange, in a manner\nconsistent with the requirements of this License, to extend the patent\nlicense to downstream recipients.  \"Knowingly relying\" means you have\nactual knowledge that, but for the patent license, your conveying the\ncovered work in a country, or your recipient's use of the covered work\nin a country, would infringe one or more identifiable patents in that\ncountry that you have reason to believe are valid.\n\n  If, pursuant to or in connection with a single transaction or\narrangement, you convey, or propagate by procuring conveyance of, a\ncovered work, and grant a patent license to some of the parties\nreceiving the covered work authorizing them to use, propagate, modify\nor convey a specific copy of the covered work, then the patent license\nyou grant is automatically extended to all recipients of the covered\nwork and works based on it.\n\n  A patent license is \"discriminatory\" if it does not include within\nthe scope of its coverage, prohibits the exercise of, or is\nconditioned on the non-exercise of one or more of the rights that are\nspecifically granted under this License.  You may not convey a covered\nwork if you are a party to an arrangement with a third party that is\nin the business of distributing software, under which you make payment\nto the third party based on the extent of your activity of conveying\nthe work, and under which the third party grants, to any of the\nparties who would receive the covered work from you, a discriminatory\npatent license (a) in connection with copies of the covered work\nconveyed by you (or copies made from those copies), or (b) primarily\nfor and in connection with specific products or compilations that\ncontain the covered work, unless you entered into that arrangement,\nor that patent license was granted, prior to 28 March 2007.\n\n  Nothing in this License shall be construed as excluding or limiting\nany implied license or other defenses to infringement that may\notherwise be available to you under applicable patent law.\n\n  12. No Surrender of Others' Freedom.\n\n  If conditions are imposed on you (whether by court order, agreement or\notherwise) that contradict the conditions of this License, they do not\nexcuse you from the conditions of this License.  If you cannot convey a\ncovered work so as to satisfy simultaneously your obligations under this\nLicense and any other pertinent obligations, then as a consequence you may\nnot convey it at all.  For example, if you agree to terms that obligate you\nto collect a royalty for further conveying from those to whom you convey\nthe Program, the only way you could satisfy both those terms and this\nLicense would be to refrain entirely from conveying the Program.\n\n  13. Use with the GNU Affero General Public License.\n\n  Notwithstanding any other provision of this License, you have\npermission to link or combine any covered work with a work licensed\nunder version 3 of the GNU Affero General Public License into a single\ncombined work, and to convey the resulting work.  The terms of this\nLicense will continue to apply to the part which is the covered work,\nbut the special requirements of the GNU Affero General Public License,\nsection 13, concerning interaction through a network will apply to the\ncombination as such.\n\n  14. Revised Versions of this License.\n\n  The Free Software Foundation may publish revised and/or new versions of\nthe GNU General Public License from time to time.  Such new versions will\nbe similar in spirit to the present version, but may differ in detail to\naddress new problems or concerns.\n\n  Each version is given a distinguishing version number.  If the\nProgram specifies that a certain numbered version of the GNU General\nPublic License \"or any later version\" applies to it, you have the\noption of following the terms and conditions either of that numbered\nversion or of any later version published by the Free Software\nFoundation.  If the Program does not specify a version number of the\nGNU General Public License, you may choose any version ever published\nby the Free Software Foundation.\n\n  If the Program specifies that a proxy can decide which future\nversions of the GNU General Public License can be used, that proxy's\npublic statement of acceptance of a version permanently authorizes you\nto choose that version for the Program.\n\n  Later license versions may give you additional or different\npermissions.  However, no additional obligations are imposed on any\nauthor or copyright holder as a result of your choosing to follow a\nlater version.\n\n  15. Disclaimer of Warranty.\n\n  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY\nAPPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT\nHOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM \"AS IS\" WITHOUT WARRANTY\nOF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,\nTHE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\nPURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM\nIS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF\nALL NECESSARY SERVICING, REPAIR OR CORRECTION.\n\n  16. Limitation of Liability.\n\n  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING\nWILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS\nTHE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY\nGENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE\nUSE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF\nDATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD\nPARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),\nEVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF\nSUCH DAMAGES.\n\n  17. Interpretation of Sections 15 and 16.\n\n  If the disclaimer of warranty and limitation of liability provided\nabove cannot be given local legal effect according to their terms,\nreviewing courts shall apply local law that most closely approximates\nan absolute waiver of all civil liability in connection with the\nProgram, unless a warranty or assumption of liability accompanies a\ncopy of the Program in return for a fee.\n\n                     END OF TERMS AND CONDITIONS\n\n            How to Apply These Terms to Your New Programs\n\n  If you develop a new program, and you want it to be of the greatest\npossible use to the public, the best way to achieve this is to make it\nfree software which everyone can redistribute and change under these terms.\n\n  To do so, attach the following notices to the program.  It is safest\nto attach them to the start of each source file to most effectively\nstate the exclusion of warranty; and each file should have at least\nthe \"copyright\" line and a pointer to where the full notice is found.\n\n    <one line to give the program's name and a brief idea of what it does.>\n    Copyright (C) <year>  <name of author>\n\n    This program is free software: you can redistribute it and/or modify\n    it under the terms of the GNU General Public License as published by\n    the Free Software Foundation, either version 3 of the License, or\n    (at your option) any later version.\n\n    This program is distributed in the hope that it will be useful,\n    but WITHOUT ANY WARRANTY; without even the implied warranty of\n    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the\n    GNU General Public License for more details.\n\n    You should have received a copy of the GNU General Public License\n    along with this program.  If not, see <https://www.gnu.org/licenses/>.\n\nAlso add information on how to contact you by electronic and paper mail.\n\n  If the program does terminal interaction, make it output a short\nnotice like this when it starts in an interactive mode:\n\n    <program>  Copyright (C) <year>  <name of author>\n    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.\n    This is free software, and you are welcome to redistribute it\n    under certain conditions; type `show c' for details.\n\nThe hypothetical commands `show w' and `show c' should show the appropriate\nparts of the General Public License.  Of course, your program's commands\nmight be different; for a GUI interface, you would use an \"about box\".\n\n  You should also get your employer (if you work as a programmer) or school,\nif any, to sign a \"copyright disclaimer\" for the program, if necessary.\nFor more information on this, and how to apply and follow the GNU GPL, see\n<https://www.gnu.org/licenses/>.\n\n  The GNU General Public License does not permit incorporating your program\ninto proprietary programs.  If your program is a subroutine library, you\nmay consider it more useful to permit linking proprietary applications with\nthe library.  If this is what you want to do, use the GNU Lesser General\nPublic License instead of this License.  But first, please read\n<https://www.gnu.org/licenses/why-not-lgpl.html>."
  },
  {
    "path": "README.md",
    "content": "# HIPRT-Path-Tracer\n\n![HIPRT path tracer cover](README_data/img/Bistro.jpg)\n\nPhysically based unidirectional (backwards) Monte Carlo path tracer written with the [HIPRT](https://gpuopen.com/hiprt/) and [Orochi](https://gpuopen.com/orochi/) libraries.\n\nHIPRT is AMD's equivalent to [OptiX](https://developer.nvidia.com/rtx/ray-tracing/optix). It allows the use of the ray tracing accelerators of RDNA2+ AMD GPUs and can run on NVIDIA devices as well (although it wouldn't take advatange of RT cores) as it is not AMD specific. \n\nThe Orochi library allows the loading of HIP and CUDA libraries at runtime meaning that the application doesn't have to be recompiled to be used on a GPU from a different vendor (unlike HIP alone which, despite being compatible with NVIDIA and AMD hardware, would require a recompilation).\n\n# System requirements\n\n- AMD RDNA1 GPU or newer (RX 5000 or newer) **or** NVIDIA Maxwell GPU or newer (GTX 700 & GTX 900 Series or newer)\n- Visual Studio 2022 (only version tested but older versions might work as well) on Windows\n- CMake\n- CUDA for NVIDIA compilation\n# Features:\n\n### Layered Principled BSDF:\n- Coat Microfacet GGX Layer + Anisotropy, Anisotropy Rotation, Medium Absorption & Thickness\n- SGGX Volumetric Sheen Lobe LTC Fit [\\[Zeltner, Burley, Chiang, 2022\\]](https://tizianzeltner.com/projects/Zeltner2022Practical/)\n- Specular Microfacet GGX Layer\n- Diffuse BRDF lobe. Support for:\n\t- Lambertian\n\t- Oren-nayar\n- Metallic Microfacet GGX Layer + Anisotropy & Anisotropy Rotation + Double Roughness [\\[Kulla & Conty, 2017\\]](https://blog.selfshadow.com/publications/s2017-shading-course/imageworks/s2017_pbs_imageworks_slides_v2.pdf)\n- Specular transmission BTDF + Beer Lambert Volumetric Absorption [\\[Burley, 2015\\]](https://blog.selfshadow.com/publications/s2015-shading-course/#course_content)\n- Diffuse lambertian BTDF\n- Spectral dispersion using Cauchy's equation\n- Multiple-scattering energy compensation for conductors (double metal layer), dielectrics (transmission layer) and glossy-diffuse (specular + diffuse layer) materials [\\[Turquin, 2019\\]](https://blog.selfshadow.com/publications/turquin/ms_comp_final.pdf)\n- Thin-film interference over dielectrics and conductors [\\[Belcour, Barla, 2017\\]](https://belcour.github.io/blog/research/publication/2017/05/01/brdf-thin-film.html)\n- Thin-walled model\n\n![LayeredBSDF](README_data/img/LayeredBSDF.png)\n\n![LayeredBSDF](README_data/img/metallic-energy.png)\n![LayeredBSDF](README_data/img/glass-energy.png)\n![LayeredBSDF](README_data/img/specular-diffuse-energy.png)\n### Sampling\n- Base light sampling techniques:\n\t- Uniform light sampling for direct lighting estimation + MIS\n\t- Power-proportional light sampling\n\t- ReGIR [\\[Boksansky et al., 2021\\]](https://cwyman.org/papers/rtg2-manyLightReGIR.pdf) augmented with:\n\t\t- Representative cell surface-data + integration with NEE++ for resampling according to the product **BRDF \\* L_i \\* G \\* V**\n\t\t- Spatial reuse\n\t\t- Hash grid\n\t\t- Per-cell RIS integral normalization factor pre-integration for multiple importance sampling support\n- Next-event estimation strategies (built on-top of base techniques):\n\t- MIS with BSDF sampling\n\t- Resampled Importance Sampling (RIS) [\\[Talbot et al., 2005\\]](https://www.researchgate.net/publication/220852928_Importance_Resampling_for_Global_Illumination)+ Weighted Reservoir Sampling (WRS) for many light sampling  + [\\[M. T. Chao, 1982\\]](https://www.jstor.org/stable/2336002)\n\t- ReSTIR DI\n\t- Next Event Estimation++ [\\[Guo et al., 2020\\]](https://graphics.tudelft.nl/Publications-new/2020/GEE20/GEE20-NEE++.pdf) + Custom envmap support\n\t- HDR Environment map + Multiple Importance Sampling using\n\t\t- CDF-inversion & binary search\n\t\t- Alias Table (Vose's O(N) construction [\\[Vose, 1991\\]](https://citeseerx.ist.psu.edu/document?repid=rep1&type=pdf&doi=f65bcde1fcf82e05388b31de80cba10bf65acc07))\n\t\n- BSDF sampling:\n\t- GGX NDF Sampling:\n\t\t- Visible Normal Distribution Function (VNDF) [\\[Heitz, 2018\\]](https://jcgt.org/published/0007/04/01/)\n\t\t- Spherical caps VNDF Sampling [\\[Dupuy, Benyoub, 2023\\]](https://arxiv.org/abs/2306.05044)\n\t\n- Path sampling:\n\t- BSDF Sampling:\n\t\t- One sample MIS for lobe sampling [\\[Hery et al., 2017\\]](https://graphics.pixar.com/library/PxrMaterialsCourse2017/paper.pdf)\n\t- ReSTIR GI [\\[Ouyang et al., 2021\\]](https://research.nvidia.com/publication/2021-06_restir-gi-path-resampling-real-time-path-tracing)\n\t- Experimental warp-wide direction reuse for improved indirect rays coherency [\\[Liu et al., 2023\\]](https://arxiv.org/abs/2310.07182)\n- ReSTIR Samplers:\n\t- ReSTIR DI [\\[Bitterli et al., 2020\\]](https://research.nvidia.com/labs/rtr/publication/bitterli2020spatiotemporal/)\n\t\t- Supports envmap sampling\n\t\t- Fused Spatiotemporal Reuse [\\[Wyman, Panteleev, 2021\\]](https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production)\n\t\t- Light Presampling [\\[Wyman, Panteleev, 2021\\]](https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production)\n\t- ReSTIR GI [\\[Ouyang et al., 2021\\]](https://research.nvidia.com/publication/2021-06_restir-gi-path-resampling-real-time-path-tracing)\n\t- Many bias correction weighting schemes:\n\t\t- 1/M\n\t\t- 1/Z\n\t\t- MIS-like,\n\t\t- Generalized balance heuristic\n\t\t- Pairwise MIS [\\[Bitterli, 2022\\]](https://digitalcommons.dartmouth.edu/dissertations/77/) & defensive formulation [\\[Lin et al., 2022\\]](https://research.nvidia.com/publication/2022-07_generalized-resampled-importance-sampling-foundations-restir))\n\t\t- Pairwise symmetric & asymmetric ratio MIS weights [\\[Pan et al., 2024\\]](https://diglib.eg.org/items/df9d727e-13a1-4d48-9275-57da7fb87f7f)\n\t- Adaptive-directional spatial reuse for improved offline rendering efficiency\n\t- Optimal visibility sampling [\\[Pan et al., 2024\\]](https://diglib.eg.org/items/df9d727e-13a1-4d48-9275-57da7fb87f7f)\n### Other rendering features\n- Microfacet Model Regularization for Robust Light Transport [\\[Jendersie et al., 2019\\]](https://jojendersie.de/wp-content/uploads/2013/06/2019_Jendersie_brdfregularization.pdf)\n- G-MoN - Adaptive median of means for unbiased firefly removal [\\[Buisine et al., 2021\\]](https://hal.science/hal-03201630v2)\n- Texture support for all the parameters of the BSDF\n- Texture alpha transparency support\n- Stochastic material opacity support\n- Normal mapping\n- Nested dielectrics support \n\t- Handling with priorities as proposed in [\\[Simple Nested Dielectrics in Ray Traced Images, Schmidt, 2002\\]](https://www.researchgate.net/publication/247523037_Simple_Nested_Dielectrics_in_Ray_Traced_Images)\n- A Low-Distortion Map Between Triangle and Square [\\[Heitz, 2019\\]](https://hal.science/hal-02073696v2/document)\n- Per-pixel variance based adaptive sampling\n- Intel [Open Image Denoise](https://github.com/RenderKit/oidn) + Normals & Albedo AOV support\n### UI\n- Interactive ImGui interface\n\t- Asynchronous interface to guarantee smooth UI interactions even with heavy path tracing kernels\n- Interactive first-person camera\n- Different frame-buffer visualization (visualize the adaptive sampling heatmap, converged pixels, the denoiser normals / albedo, ...)\n### Other features\n- Use of the [\\[ASSIMP\\]](https://github.com/assimp/assimp) library to support [many](https://github.com/assimp/assimp/blob/master/doc/Fileformats.md) scene file formats.\n- Multithreaded scene parsing/texture loading/shader compiling/BVH building/envmap processing/... for faster application startup times\n- Background-asynchronous path tracing kernels pre-compilation\n- Shader cache to avoid recompiling kernels unnecessarily\n\n# Building\n## Prerequisites\n### Windows\n#### - AMD GPUs\n\n1) Install the [HIP SDK](https://www.amd.com/en/developer/resources/rocm-hub/hip-sdk.html)\n\n2) Follow the \"[**Compiling**](#compiling)\" steps.\n\n#### - NVIDIA GPUs\n\nTo build the project on NVIDIA hardware, you will need to install the NVIDIA CUDA SDK v12.2 (minimum). It can be downloaded and installed from [here](https://developer.nvidia.com/cuda-downloads).\n\nYour `CUDA_PATH` environment variable then needs to be defined. \nThis should automatically be the case after installing the CUDA Toolkit but just in case, \nyou can define it yourself such that `CUDA_PATH/include/cuda.h` is a valid file path.\n\n### Linux\n\n#### - AMD GPUs\n\n1) Install OpenGL, GLFW and glew dependencies:\n\n```sh\nsudo apt install freeglut3-dev\nsudo apt install libglfw3-dev\nsudo apt install libglew-dev\n```\n\n2) Install AMD HIP (if you already have ROCm installed, you should have a `/opt/rocm` folder on your system and you can skip this step):\n\nDownload `amdgpu-install` package: https://rocm.docs.amd.com/projects/install-on-linux/en/latest/install/amdgpu-install.html\n\nInstall the package: \n\n```sh\nsudo apt install ./amdgpu-install_xxxx.deb\n```\n\nInstall HIP: \n\n```sh\nsudo amdgpu-install --usecase=hip\n```\n\n3) Normally, you would have to run the path tracer as `sudo` to be able to acces GPGPU compute capabilities. However, you can save yourself the trouble by adding the user to the `render` group and **rebooting your system** :\n\n```sh\nsudo usermod -a -G render $LOGNAME\n```\n#### - NVIDIA GPUs\n\n1) Install OpenGL, GLFW and glew dependencies:\n\n```sh\nsudo apt install freeglut3-dev\nsudo apt install libglfw3-dev\nsudo apt install libglew-dev\nsudo apt install libomp-dev\n```\n\n2) Install the NVIDIA CUDA SDK (called \"CUDA Toolkit\"). It can be downloaded and installed from [here](https://developer.nvidia.com/cuda-downloads).\n## Compiling\n\nWith the pre-requisites fulfilled, you now just have to run the CMake:\n\n``` sh\ngit clone https://github.com/TomClabault/HIPRT-Path-Tracer.git --recursive\ncd HIPRT-Path-Tracer\nmkdir build\ncd build\ncmake -DCMAKE_BUILD_TYPE=Debug ..\n```\n\nOn Windows, a Visual Studio solution will be generated in the `build` folder that you can open and compile the project with (select `HIPRTPathTracer` as startup project).\n\nOn Linux, the `HIPRTPathTracer` executable will be generated in the `build` folder.\n\n## Usage\n\n`./HIPRT-Path-Tracer`\n\nThe following arguments are available:\n- `<scene file path>` an argument of the commandline without prefix will be considered as the scene file. File formats [supported](https://github.com/assimp/assimp/blob/master/doc/Fileformats.md).\n- `--sky=<path>` for the equirectangular skysphere used during rendering (HDR or not)\n- `--samples=N` for the number of samples to trace*\n- `--bounces=N` for the maximum number of bounces in the scene*\n- `--w=N` / `--width=N` for the width of the rendering*\n- `--h=N` / `--height=N` for the height of the rendering*\n\n\\* CPU only commandline arguments. These parameters are controlled through the UI when running on the GPU.\n\n# Gallery\n\n![DispersionDiamonds](README_data/img/DispersionDiamonds.jpg)![Bistro](README_data/img/Bistro.jpg)\n![P1 street](README_data/img/P1_environment.jpg)\n![Contemporary bedroom](README_data/img/contemporary-bedroom.jpg)![Blender 4.1 splash](README_data/img/blender-4.1-splash.jpg)\n![Dragon glass](README_data/img/dragon-glass.jpg)\n![Beeple Zero Day Measure Seven](README_data/img/bzd-measure-seven.jpg)\n![Lux Core Orbs](README_data/img/LuxCoreBalls.jpg)\n![Mitsuba Knob Sheen Dust](README_data/img/MitsubaSheenDustOrbs.jpg)\n![Dragon indirect lighting](README_data/img/DragonBTDF.jpg)![Dragon indirect lighting](README_data/img/pbrt-dragon-indirect-v2.jpg)\n![MIS vs. RIS vs. ReSTIR DI Comparison](README_data/img/RIS.ReSTIR.Comparison.jpg)\n![ImGui Interface](README_data/img/ImGuiDemo.jpg)\nSources of the scenes can be found [here](./SceneCredits.txt).\n# Live YouTube Demos\n\n### Material Editor Demo\n[![Material Editor Demo](./README_data/img/Material_editor_thumbnail.jpg)](https://www.youtube.com/watch?v=LOVBwOoLVVQ \"Material Editor Demo\")\n### OIDN AOVs Quality Comparison\n[![OIDN AOVs Comparison](./README_data/img/OIDN_AOVs_thumbnail.jpg)](https://www.youtube.com/watch?v=GnCi7K2w9go \"OIDN AOVs Comparison\")\n### ReSTIR DI vs. RIS vs. MIS Showcase\n[![ReSTIR DI Showcase](./README_data/img/ReSTIR_DI_Showcase_thumbnail.jpg)](https://www.youtube.com/watch?v=R6nkhSDoJ4U \"ReSTIR DI vs. RIS vs. MIS Showcase\")\n### Thin-film iridescence render\n[![OIDN AOVs Comparison](./README_data/img/thin-film-iri-thumbnail.jpg)](https://www.youtube.com/watch?v=rGwkacGbd3g \"Thin-film iridescence render\")\n# License\n\nGNU General Public License v3.0 or later\n\nSee [COPYING](https://github.com/TomClabault/HIPRT-Path-Tracer/blob/main/COPYING) to see the full text.\n"
  },
  {
    "path": "README_data/Features/features.md",
    "content": "### TODO\n- Disney BSDF (Diffuse, fake subsurface, metallic, roughness, anisotropy + anisotropy rotation, clearcoat, sheen, glass, volumetric Beer-Lambert absorption, ...) \\[Burley, 2015\\]\n\t- For experimentation purposes, the BRDF diffuse lobe can be switched for either:\n\t\t- The original \"Disney diffuse\" presented in [\\[Burley, 2012\\]](https://disneyanimation.com/publications/physically-based-shading-at-disney/)\n\t\t- A lambertian distribution\n\t\t- The Oren Nayar microfacet diffuse model.\n### TODO\n- Texture support for all the parameters of the BSDF\n### TODO\n\n- BSDF Direct lighting multiple importance sampling\n### TODO\n\n- HDR Environment map + importance sampling using\n\t- CDF-inversion binary search\n### TODO\n- Emissive geometry light sampling\n\n### TODO\n- Nested dielectrics support \n\t- Automatic handling as presented in \\[Ray Tracing Gems, 2019\\]\n\t- Handling with priorities as proposed in \\[Simple Nested Dielectrics in Ray Traced Images, Schmidt, 2002\\]\n### Per-pixel adaptive sampling\n\nAdaptive sampling is a technique that allows focusing the samples on pixels that need more of them. This is useful because not all parts of a scene are equally complex to render.\n\nConsider this modified cornell box for example:\n\n![Cornell box PBR reflective caustic reference](./img/cornell_pbr_reference.jpg)\n\nHalf of the rays of this scene don't even intersect any geometry and directly end up in the environment where the color of the environment map is computed. The variance of the radiance of these rays is very low since a given camera ray direction basically always results in the same radiance (almost) being returned.\n\nHowever, the same cannot be said for the reflective caustic (the emissive light panel reflecting off the mirror small box) at the top right of the Cornell box. A camera ray that hits this region of the ceiling then has a fairly low chance of bouncing in direction of the small box to then bounce directly in the direction of the light. This makes the variance of these rays very high which really slows down the convergence of this part of the scene. As a result, we would like to shoot more rays at these pixels than at other parts of the scene.\n\nAdaptive sampling allows us to do just that. The idea is to estimate the error of each pixel of the image, compare this estimated error with a user-defined threshold $T$ and only continue to sample the pixel if the pixel's error is still larger than the threshold.\n\nA very simple error metric is that of the variance of the luminance $\\sigma^2$ of the pixel. In practice, we want to estimate the variance of a pixel across the $N$ samples $x_k$ it has received so far. \n\nThe variance of $N$ samples is usually computed as:\n$$\\sigma^2 = \\frac{1}{N}\\sum_{k=1}^N (x_k - \\mu) ^2$$\nHowever, this approach would imply keeping the average of each pixel's samples (which is the framebuffer itself so that's fine) as well as the values of all samples (that's not fine). Every time we want to estimate the error of a single pixel, we would then have to loop over all the previous samples to compute their difference with the average and get our variance $\\sigma^2$. Keeping track of all the samples is infeasible in terms of memory consumption (that would be 2GB of RAM/VRAM for a mere 256 samples' floating-point luminance at 1080p) and looping over all the samples seen so far is computationally way too demanding.\n\nThe practical solution is to evaluate the running-variance of the $N$ pixel samples $x_k$:\n$$\\sigma^2 = \\frac{1}{N - 1} \\left(\\sum_{k=1}^N x_k^2 - \\left( \\sum_{k=1}^N x_k \\right)^2\\right)$$\n  *Note that due to the nature of floating point numbers, this formula can have some precision issues. [This](https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm) Wikipedia article presents good alternatives.*\n\nWith the variance, we can compute a 95% confidence interval $I$:\n$$I = 1.96 \\frac{\\sigma}{\\sqrt{N}}$$\n This 95% confidence interval gives us a range around our samples mean $\\mu$ and we can be 95% sure that, for the current number of samples $N$ and and their variance $\\sigma$ that we used to compute this interval, the converged mean (true mean) of an infinite amount of samples is in that interval.\n\n![Confidence interval visualization](./img/confidenceInterval.png)\n\n*Visualization of the confidence interval **I** (green arrows) around **µ**.*\n\nJudging by how $I$ is computed, it is easy to see that as the number of samples $N$ increases or the variance $\\sigma^2$ decreases (and thus $\\sigma$ decreases too), $I$ decreases. \n\nThat should make sense since as we increase the number of samples, our mean $\\mu$ should get closer and closer to the \"true\" mean value of the pixel (which is the value of the fully converged pixel when an infinite amount of samples are averaged together). \n\nIf $I$ gets smaller, this means for our $\\mu$ that it also gets closer to the \"true\" mean and that is the sign that our pixel has converged a little more.\n\n![Confidence interval smaller visualization](./img/confidenceInterval2.png)\n\n*As the number of samples increases (or as the computed variance decreases), **I** gets smaller, meaning that the true mean is closer to our current mean which in turn means that our pixel has converged a little more.*\n\nKnowing that we can interpret $I$ as a measure of the convergence of our pixel, the question now becomes: \n\n**When do we assume that our pixel has sufficiently converged and stop sampling?**\n\nWe use that user-given threshold $T$ we talked about earlier! Specifically, we can assume that if:\n\n$$I \\leq T\\mu$$\nThen that pixel has converged enough for that threshold $T$. As a practical example, consider $T=0$. We then have:\n\n``` math\n\\displaylines{I \\leq T\\mu \\\\ I \\leq 0}\n```\nIf $I =0$, then the interval completely collapses on $\\mu$. Said otherwise, $\\mu$ **is** the true mean and our pixel has completely converged. Thus, for $T=0$, we will only stop sampling the pixel when it has fully converged.\n\nIn practice, having $I=0$ is infeasible. After some experimentations a $T$ threshold of $0.1$ seems to target a visually very reasonable amount of noise. Any $T$ lower than that represents quite the overhead in terms of rendering times but can still provide some improvements on the perceived level of noise:\n\n![cornellThreshold](./img/cornellThreshold.jpg)\n*Comparison of the noise level obtained after all pixels have converged and stopped sampling with a varying **T** threshold*\n\nNow if you look at the render with $T=0.1$, you'll notice that the caustic on the ceiling is awkwardly noisier than the rest of the image. There are some \"holes\" in the caustic (easy to see when you compare it to the $T=0.05$ render).\n\nThis is an issue of the per-pixel approach used here: because that caustic has so much variance, it is actually possible that we sample a pixel on the ceiling 50 times (arbitrary number) without ever finding a path to the light. The sampled pixel will then remain gray-ish (diffuse color of the ceiling) instead of being bright because of the caustic. Our evaluation of the error of this pixel will then assume that it has converged since it has gone through 50 samples without that much of a change in radiance, meaning that it has a low variance, meaning that we can stop sampling it. \n\nBut we shouldn't! If we had sampled it maybe 50 more times, we would have probably found a path that leads to the light, spiking the variance of the pixel which in turn would be sampled until the variance has attenuated enough so that our confidence interval $I$ is small again and gets below our threshold.\n\nOne solution is simply to increase the minimum number of samples that must be traced through a pixel before evaluating its error. This way, the pixels of the image all get a chance to show their true variance and can't escape the adaptive sampling strategy! \n\n![minimumSampleNumber](./img/minimumSampleNumber.jpg)\n*Impact of the minimum amount of samples to trace before starting evaluating adaptive sampling for the same **T** threshold.*\n\nThis is however a poor solution since this forces all pixels of the image to be sampled at least 100 times, even the ones that would only need 50 samples. This is a waste of computational resources.\n\nA better way of estimating the error of the scene is presented in the \"Hierarchical Adaptive Sampling\" section.\n\nNonetheless, this naive way of estimating the error of a pixel can provide very appreciable speedups in rendering time:\n\n![Adaptive Sampling Speedup](./img/testedScenes.jpg) \n\nThe application also offers the possibility to visualize where the rays are being concentrated on the image thanks to a heatmap (based on the number of rays per pixel):\n\n![Adaptive sampling heatmap](./img/heatmap.jpg)\n### TODO\n- Hierarchical adaptive sampling\n\n### Normal mapping\nNormal mapping (or bump mapping) is a technique that aims at visually improving perceived geometric details without actually having the geometry for it. This is done through the use of normal maps which are textures that look like this:\n\n<p align=\"center\">\n  <img src=\"./img/normalMap.jpg\" />\n</p>\n\n*An example normal map.*\n\nEach pixel of this texture represents a perturbation of the geometric normal of the surface. Because the lighting of a surface strongly depends on its orientation (its normal), if the normal of the surface is altered, then the lighting will be too.\n\nThe three channels RGB of a pixel of the texture respectively represent the X, Y and Z coordinates of the perturbed normal. However, you cannot just read from the texture using texture coordinates and assume that the RGB values of the pixel you get is going to be 1:1 the new normal of your surface:\n\t- The pixel are in $[0, 1]$ (or $[0, 255]$ if your prefer) but a normal is in $[-1, 1]$\n\t- The normals of the texture are in their own coordinate space called tangent space. They are not in the same space as your model. They will have to be transformed.\n\nBringing the pixel from $[0, 1]$ to the tangent space normal in $[-1, 1]$ is fairly straightforward:\n\n$$N_{TS} = Pixel * 2 - 1$$\nThe more interesting question is how to bring the normal from tangent space to the coordinate space of our model (and then the world) so that we can actually use our normal for the lighting calculations. To do that, we're going to need a transformation matrix, also called an ONB (Orthonormal Basis) in this case. This matrix will let us bring the tangent space normal to model space (a change of basis).\n\n<p align=\"center\">\n  <img src=\"./img/normalMappingTBN.jpg\" />\n</p>\n\n*TBN vectors used for the ONB matrix calculation. Illustration from* [LearnOpenGL](https://learnopengl.com/Advanced-Lighting/Normal-Mapping).\n\nBut how do we find that matrix?\n\nThe matrix is going to be built from three vectors: $T$, $B$ and $N$. $T$ and $B$ are called the tangent and bitangent vectors (depicted in the figure above). They represent the $X$ and $Y$ coordinates of our tangent space. $N$ is the geometric normal of our surface (or smooth normal if you're using interpolated vertex normals), it is the $Z$ coordinate of our tangent space.\n\n*Sidenote: you may have noticed that normal maps are blue-ish in general. This is due to the normals being mostly oriented towards the **Z** axis (which is the blue channel of the pixel) of the tangent space which is the normal of our surface. Since a normal map represents perturbations of the surface normal, it is expected that the normal map is going to be mostly the normal of our surface itself.*\n\nThe goal is then to find these $T$ and $B$ vectors. We know that these two vectors are aligned with the $U$ and $V$ directions of the texture respectively. If $p_0$, $p_1$ and $p_2$ are the three vertices in counter-clockwise order of the triangle that we intersected and that they have $UV_1=(u_1, v_1)$, $UV_2=(u_2, v_2)$ and $UV_3=(u_3, v_3)$ for texture coordinates respectively, we can define two of the edges $e_1$ and $e_2$ of our triangle simply as:\n\n```math\n\\displaylines{e_1 = p_2-p_1 \\\\ e_2 = p_3-p_2}\n```\n\n*Sidenote again: Note that the **T** and **B** we're computing **need** to be aligned with the **U** and **V** directions of the texture. A generic algorithm ([Duff, 2017](https://graphics.pixar.com/library/OrthonormalB/paper.pdf) for example) for finding arbitrary tangent and bitangent vectors to a normal cannot be used here. The process of building the ONB for normal mapping here isn't the same as when building an ONB of the \"shading space\" for BSDF evaluation.*\n\nSimilarly, we can define the differences $\\Delta UV_1$ and $\\Delta UV_2$ in textures coordinates of these vertices:\n\n```math\n\\displaylines{\\Delta UV_1 = (\\Delta U_1, \\Delta V_1)=UV_2-UV_1 \\\\ \\Delta UV_2 = (\\Delta U_2, \\Delta V_2) = UV_3-UV_2}\n```\n\n<p align=\"center\">\n  <img src=\"./img/normalMappingE1E2.jpg\" />\n</p>\n\n***e1** and **e2** can be expressed in terms of **ΔU\\*T** and **ΔV\\*B**. Illustration from* [LearnOpenGL](https://learnopengl.com/Advanced-Lighting/Normal-Mapping).\n\nThese $\\Delta UV_1$ and $\\Delta UV_2$ can be understood as the edges $e_1$ and $e_2$ but expressed in the coordinate space of the texture, the $UV$ coordinate space. Because we know that $UV$ coordinates are aligned with the $T$ and $B$ vectors that we're looking for (remember the \"TBN vectors used for the ONB matrix calculation\" illustration), we can therefore express $e_1$ and $e_2$ in terms of $\\Delta UV_1$, $\\Delta UV_2$, $T$ and $B$:\n\n```math\n\\displaylines{e_1 = T*\\Delta U_1 + B*\\Delta V_1 \\\\ e_2 = T*\\Delta U_2 + B*\\Delta V_2}\n```\n\nIn matrix form, this can be written as:\n```math  \n\\begin{bmatrix}\n\\uparrow & \\uparrow \\\\\ne_1 & e_2 \\\\\n\\downarrow & \\downarrow\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n\\uparrow & \\uparrow \\\\\nT & B \\\\\n\\downarrow & \\downarrow\n\\end{bmatrix}\n\\begin{bmatrix}\n\\Delta U_1 & \\Delta U_2 \\\\\n\\Delta V_1 & \\Delta V_2\n\\end{bmatrix}\n```\n\nWe can then solve for $T$ and $B$ by multiplying by the inverse of the $\\left[ \\Delta U_1 \\Delta U_2, \\Delta V_1 \\Delta V_2 \\right]$ matrix:\n\n```math  \n\\begin{bmatrix}\n\\uparrow & \\uparrow \\\\\nT & B \\\\\n\\downarrow & \\downarrow\n\\end{bmatrix}\n=\n\\begin{bmatrix}\n\\uparrow & \\uparrow \\\\\ne_1 & e_2 \\\\\n\\downarrow & \\downarrow\n\\end{bmatrix}\n\\frac{1}{\\Delta U_1\\Delta V_2 -\\Delta V_1\\Delta U_2}\n\\begin{bmatrix}\n\\Delta V_2 & -\\Delta U_2 \\\\\n-\\Delta V_1 & \\Delta U_1\n\\end{bmatrix}\n```\n\nThis stems from the fact that the inverse of a 2x2 matrix is given by:\n\n```math  \n\\begin{bmatrix}\na & b \\\\\nc & d\n\\end{bmatrix}^{-1}\n=\n\\frac{1}{ad-bc}\n\\begin{bmatrix}\nd & -b \\\\\n-c & a\n\\end{bmatrix}\n```\n\n*Sidenote yet again: when the determinant $ad-bc$ of the matrix is equal to 0, we're getting a division by 0 in the fraction and we cannot compute the inverse of the matrix. This is why matrices that have a determinant equal to 0 cannot be inversed. Such a matrix is said to be singular.*\n\nOur $T$ and $B$ vectors now computed, the TBN matrix that will allow us to pass from the normal of our normal map (tangent space) to the model-space is given by:\n\n```math\nMat_{TBN} = \n\\begin{bmatrix}\n\\uparrow & \\uparrow & \\uparrow \\\\\nT & B & N \\\\\n\\downarrow & \\downarrow & \\downarrow\n\\end{bmatrix}\n```\n\nThe final normal that we can use for our shading is thus:\n\n$$N_{shading}=Mat_{TBN}*N_{TS}$$\n\nTODO visual impact\n### Interactive ImGui Interface & FPS Camera\n\nWhen rendering on the GPU, an ImGui interface is available to help playing with the parameters of the path tracer.\n\nThe goal of the interface really is to allow experimentations in terms of performance and visual impact.\n\n![ImGui interface](./img/imguiInterface.jpg)\n\nThe GUI also offers a first-person camera to move around the scene:\n- Right click to pan\n- Left click for rotating the view\n- Mouse wheel for zooming in/out\n### Visualization\n\nAgain with the goal of experimenting and better understand what is happening under the hood, the \"Display view\" option in the ImGui interface under \"Render settings\" allows to change what the viewport is displaying. For example, The AOVs (Arbitrary Output Values, which are additional data fed to the denoiser to help it denoiser better) of the denoiser such as the normals and albedo color of the scene can be visualized (this can also serve for debugging and making sure everything is in order)\n\n![Denoiser normal visualization](./img/denoiserAlbedoNormal.jpg)\n\nMore visualization options are available (adaptive sampling heatmap as used in the [adaptive sampling section](#per-pixel-adaptive-sampling) is one of them), have a look at them in the app!\n\n### ASSIMP\n\n[ASSIMP](https://github.com/assimp/assimp) is a library that provides a uniform interface for parsing [many](https://github.com/assimp/assimp/blob/master/doc/Fileformats.md) different file formats. Although not all extensions of some important file formats are not supported (ASSIMP doesn't seem to be recognizing the PBR extension of OBJ (\"aniso\" keyword issue) files and doesn't support all GLTF 2.0 extensions for example), ASSIMP vastly improves the range of scene files supported by the application.\n\n### TODO\n - Optimized application startup time with:\n\t- Multithreaded texture loading\n\t- Asynchronous path tracing kernel compilation\n### TODO\n- Intel Open Image Denoise + Normals & Albedo AOV support\n\n## TODO\n\nFilter functions\n"
  },
  {
    "path": "README_data/img/LayeredBSDF.drawio",
    "content": "<mxfile host=\"app.diagrams.net\" agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:134.0) Gecko/20100101 Firefox/134.0\" version=\"25.0.3\">\n  <diagram name=\"Page-1\" id=\"_YB4e4YoaT3FMhfF6Ti6\">\n    <mxGraphModel dx=\"576\" dy=\"307\" grid=\"1\" gridSize=\"1\" guides=\"1\" tooltips=\"1\" connect=\"1\" arrows=\"1\" fold=\"1\" page=\"1\" pageScale=\"1\" pageWidth=\"827\" pageHeight=\"1169\" background=\"#0d1117\" math=\"0\" shadow=\"0\">\n      <root>\n        <mxCell id=\"0\" />\n        <mxCell id=\"1\" parent=\"0\" />\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-22\" value=\"\" style=\"rounded=1;whiteSpace=wrap;html=1;strokeWidth=1;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"324\" y=\"322\" width=\"426\" height=\"39\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-21\" value=\"\" style=\"rounded=1;whiteSpace=wrap;html=1;strokeWidth=1;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"546\" y=\"411\" width=\"138\" height=\"55\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-2\" value=\"&lt;font style=&quot;font-size: 11px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Sheen&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#C7C7C7;fontColor=#333333;strokeColor=#666666;gradientColor=#FFFFFF;gradientDirection=radial;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"324\" y=\"365\" width=\"360\" height=\"27\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-5\" value=\"&lt;font face=&quot;Kollektif&quot;&gt;2x GGX Metal&lt;br&gt;&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;gradientColor=#FFCE9F;gradientDirection=west;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"323\" y=\"413\" width=\"96\" height=\"61\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-10\" value=\"&lt;p style=&quot;line-height: 120%;&quot;&gt;&lt;font style=&quot;font-size: 11px;&quot; face=&quot;Kollektif&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot;&gt;Specular&lt;/font&gt;&lt;/p&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#f5f5f5;fontColor=#333333;strokeColor=#666666;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"425\" y=\"413\" width=\"116\" height=\"24\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-11\" value=\"&lt;div&gt;&lt;font style=&quot;font-size: 11px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Diffuse&amp;nbsp;&lt;/font&gt;&lt;/div&gt;&lt;div&gt;&lt;font style=&quot;font-size: 11px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Diffuse transmission&lt;br&gt;&lt;/font&gt;&lt;/div&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#eff7ee;strokeColor=#82b366;gradientColor=#9ac798;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"425\" y=\"441\" width=\"116\" height=\"33\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-12\" value=\"&lt;font style=&quot;font-size: 10px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Transmission&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#FFFFFF;strokeColor=none;gradientColor=#FFFFFF;gradientDirection=south;arcSize=36;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"546\" y=\"413\" width=\"138\" height=\"22\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-13\" value=\"&lt;font style=&quot;font-size: 9px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Homogeneous &lt;/font&gt;&lt;font style=&quot;font-size: 9px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Volumetric Absorption&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#A9C4EB;strokeColor=none;gradientColor=#FFFFFF;gradientDirection=north;arcSize=27;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"546\" y=\"435\" width=\"138\" height=\"39\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-14\" value=\"&lt;font style=&quot;font-size: 11px;&quot; face=&quot;Kollektif&quot;&gt;Emission&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#fff2cc;strokeColor=#d6b656;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"690\" y=\"366\" width=\"60\" height=\"108\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-18\" value=\"\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=none;fontColor=#333333;strokeColor=#9F9F9F;strokeWidth=1;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"546\" y=\"411\" width=\"138\" height=\"63\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-20\" value=\"\" style=\"group\" parent=\"1\" vertex=\"1\" connectable=\"0\">\n          <mxGeometry x=\"324\" y=\"323\" width=\"426\" height=\"38\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-16\" value=\"&lt;div style=&quot;font-size: 11px;&quot;&gt;&lt;font style=&quot;font-size: 11px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Coat&lt;/font&gt;&lt;/div&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;gradientDirection=radial;glass=0;rotation=0;strokeWidth=0;perimeterSpacing=1;strokeColor=none;\" parent=\"LqvaC12SgwfPoJ_6hcm2-20\" vertex=\"1\">\n          <mxGeometry width=\"426\" height=\"20\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-17\" value=\"&lt;div style=&quot;font-size: 11px;&quot;&gt;&lt;font style=&quot;font-size: 11px;&quot; data-font-src=&quot;https://fonts.googleapis.com/css?family=Kollektif&quot; face=&quot;Kollektif&quot;&gt;Coat Medium Absorption&lt;br&gt;&lt;/font&gt;&lt;/div&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;gradientDirection=south;gradientColor=#EF958F;perimeterSpacing=0;glass=1;imageWidth=34;imageHeight=3;fillColor=default;strokeWidth=0;arcSize=26;strokeColor=none;\" parent=\"LqvaC12SgwfPoJ_6hcm2-20\" vertex=\"1\">\n          <mxGeometry y=\"18\" width=\"426\" height=\"20\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"LqvaC12SgwfPoJ_6hcm2-19\" value=\"\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=none;fontColor=#333333;strokeColor=#666666;strokeWidth=1;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"324\" y=\"322\" width=\"426\" height=\"39\" as=\"geometry\" />\n        </mxCell>\n        <mxCell id=\"zKmTrmhVeMyoOnWYh9cR-2\" value=\"&lt;font style=&quot;font-size: 9px;&quot;&gt;Thin-film interferences&lt;br style=&quot;font-size: 9px;&quot;&gt;&lt;/font&gt;\" style=\"rounded=1;whiteSpace=wrap;html=1;fillColor=#BDCAFC;strokeColor=#6c8ebf;gradientDirection=east;gradientColor=#FF99CC;glass=0;verticalAlign=middle;fontSize=9;fontFamily=Kollektif;fontSource=https%3A%2F%2Ffonts.googleapis.com%2Fcss%3Ffamily%3DKollektif;\" parent=\"1\" vertex=\"1\">\n          <mxGeometry x=\"324\" y=\"395\" width=\"360\" height=\"13\" as=\"geometry\" />\n        </mxCell>\n      </root>\n    </mxGraphModel>\n  </diagram>\n</mxfile>\n"
  },
  {
    "path": "SceneCredits.txt",
    "content": "Bistro: https://developer.nvidia.com/orca/amazon-lumberyard-bistro\nBeeple Zero Day: https://developer.nvidia.com/orca/beeple-zero-day\nBlender 4.1 Splash: https://www.blender.org/download/demo-files/\nContemporary bedroom: https://www.cgtrader.com/free-3d-models/interior/bedroom/bedroom-interior-contemporer\nGlass dragon: https://benedikt-bitterli.me/resources/\nSuzanne Caustics (old render): Just Suzanne from Blender with some very small light inside of it\nMcLaren P1 + Environment: https://sketchfab.com/3d-models/mclaren-p1-6d6072b79ae444058a8f7c7ffd548fb4#download + https://www.cgtrader.com/items/4740776/download-page\nPBRT Dragon Indirect Lighting: https://www.pbrt.org/scenes-v3\nRolex: https://www.cgtrader.com/items/4228612/download-page\nLuxCoreBalls: https://luxcorerender.org/example-scenes/\nMitsuba-knob: https://casual-effects.com/data/\nPorsche 718 Cayman GT4: https://www.cgtrader.com/free-3d-models/car/sport-car/porsche-718-cayman-gt4-5734530d-8afb-4852-9421-b71bde9adce3\n"
  },
  {
    "path": "cmake/Clip.cmake",
    "content": "add_subdirectory(thirdparties/clip)\n\n# Enabling CLIP's image support\nadd_compile_definitions(CLIP_ENABLE_IMAGE=1)\n"
  },
  {
    "path": "cmake/SetupASSIMP.cmake",
    "content": "# We're going to disable shared libs for assimp but we need to save\n# the current value of BUILD_SHARED_LIBS before overriding it with\n# OFF (for assimp only)\nset(BUILD_SHARED_LIBS_BACKUP ${BUILD_SHARED_LIBS})\nset(CMAKE_BUILD_TYPE_BACKUP ${CMAKE_BUILD_TYPE})\nset(BUILD_SHARED_LIBS OFF)\nset(ASSIMP_NO_EXPORT ON)\nset(ASSIMP_BUILD_TESTS OFF)\nset(ASSIMP_INSTALL_PDB OFF)\nset(ASSIMP_BUILD_ZLIB ON)\nset(ASSIMP_BUILD_ASSIMP_VIEW OFF)\n\nset(ASSIMP_SUBMODULE_DIR ${CMAKE_SOURCE_DIR}/thirdparties/ASSIMP-Fork)\n\nif(NOT EXISTS ${ASSIMP_SUBMODULE_DIR}/code)\n\t# Making sure that the HIPRT submodule was cloned\n\tmessage(FATAL_ERROR \"The ASSIMP submodule couldn't be found. Did you forget to clone the submodules? Run 'git submodule update --init --recursive'.\")\nendif()\n\nadd_subdirectory(${ASSIMP_SUBMODULE_DIR})\n\n# Restoring varaibles\nset(BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS_BACKUP})\nset(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE_BACKUP})\n"
  },
  {
    "path": "cmake/SetupHIPRT.cmake",
    "content": "set(HIPRT_SUBMODULE_DIR ${CMAKE_SOURCE_DIR}/thirdparties/HIPRT-Fork)\n\nif(NOT EXISTS ${HIPRT_SUBMODULE_DIR}/hiprt)\n\t# Making sure that the HIPRT submodule was cloned\n\tmessage(FATAL_ERROR \"The HIPRT submodule couldn't be found. Did you forget to clone the submodules? Run 'git submodule update --init --recursive'.\")\nendif()\n\nset(NO_ENCRYPT ON)\nset(NO_UNITTEST ON)\n#option(HIPRT_PREFER_HIP_5 \"Prefer HIP 5\" OFF)\n\nset(CMAKE_EXE_LINKER_FLAGS_DEBUGGPU \"\")\nadd_subdirectory(${HIPRT_SUBMODULE_DIR})\n\n# Now that we built HIPRT, we can set the variables that will be used in the rest of the CMake \n# to find the headers, the libraries, ...\nset(HIPRT_BIN_DIR ${HIPRT_SUBMODULE_DIR}/dist/bin/${CMAKE_BUILD_TYPE})\nset(HIPRT_HEADERS_DIR ${HIPRT_SUBMODULE_DIR}/hiprt)\n\n# The GPU compiler will need this additional include folder to properly compile some kernels\nadd_compile_definitions(KERNEL_COMPILER_ADDITIONAL_INCLUDE=\"${HIPRT_SUBMODULE_DIR}\")\n\n# Replacing backslashes in the Windows paths that lead to wrong escape character\n# note that the four backslashes \\\\\\\\ are required because we need a regular expression that\n# compiles to '\\'.\n# \\\\ is converted by CMake to a single '\\'\n# so \\\\\\\\ is converted by CMake to '\\\\' which is the regular expression for the single '\\' character\nSTRING(REGEX REPLACE \"\\\\\\\\\" \"/\" HIPRT_HEADERS_DIR ${HIPRT_HEADERS_DIR})\n\t\nlink_directories(${HIPRT_BIN_DIR})\n"
  },
  {
    "path": "cmake/SetupOIDN.cmake",
    "content": "if (WIN32)\n\tset(OIDN_URL https://github.com/RenderKit/oidn/releases/download/v2.3.0/oidn-2.3.0.x64.windows.zip)\nelseif(UNIX)\n\tset(OIDN_URL https://github.com/RenderKit/oidn/releases/download/v2.3.0/oidn-2.3.0.x86_64.linux.tar.gz)\nendif()\n\nFetchContent_Declare(\n\toidnbinaries\n\tURL      ${OIDN_URL}\n)\n\nFetchContent_MakeAvailable(\n\toidnbinaries\n)\n"
  },
  {
    "path": "cmake/SetupOrochi.cmake",
    "content": "set(OROCHI_SUBMODULE_DIR ${CMAKE_SOURCE_DIR}/thirdparties/Orochi-Fork)\n\nif(NOT EXISTS ${OROCHI_SUBMODULE_DIR}/ParallelPrimitives)\n\t# Making sure that the Orochi submodule was cloned\n\tmessage(FATAL_ERROR \"The Orochi submodule couldn't be found. Did you forget to clone the submodules? Run 'git submodule update --init --recursive'.\")\nendif()\n\nset(OROCHI_BIN_DIR ${OROCHI_SUBMODULE_DIR})\nset(OROCHI_SOURCES_DIR ${OROCHI_SUBMODULE_DIR}/Orochi)\n\nset(CUEW_SOURCES_DIR ${OROCHI_SUBMODULE_DIR}/contrib/cuew)\nset(HIPEW_SOURCES_DIR ${OROCHI_SUBMODULE_DIR}/contrib/hipew)\n\nSTRING(REGEX REPLACE \"\\\\\\\\\" \"/\" OROCHI_SOURCES_DIR ${OROCHI_SOURCES_DIR})\nSTRING(REGEX REPLACE \"\\\\\\\\\" \"/\" CUEW_SOURCES_DIR ${CUEW_SOURCES_DIR})\nSTRING(REGEX REPLACE \"\\\\\\\\\" \"/\" HIPEW_SOURCES_DIR ${HIPEW_SOURCES_DIR})\n\n# TODO remove when issue #7 (https://github.com/GPUOpen-LibrariesAndSDKs/HIPRT/issues/7) is fixed\nif (true)\n\tfile(COPY ${OROCHI_SUBMODULE_DIR}/ParallelPrimitives DESTINATION ${CMAKE_SOURCE_DIR}/contrib/Orochi)\n\tfile(COPY ${HIPRT_SUBMODULE_DIR}/hiprt/impl DESTINATION ${CMAKE_SOURCE_DIR}/hiprt)\n\n\tfile(GLOB HIPRT_FILES_TO_COPY ${HIPRT_SUBMODULE_DIR}/hiprt/*.h ${HIPRT_SUBMODULE_DIR}/hiprt/*.in)\n\tfile(COPY ${HIPRT_FILES_TO_COPY} DESTINATION ${CMAKE_SOURCE_DIR}/hiprt/)\nendif()\n"
  },
  {
    "path": "cmake/SetupTracy.cmake",
    "content": "add_subdirectory(thirdparties/tracy)\n\nset(DISABLE_TRACY_PROFILING ON)\n\nif (DISABLE_TRACY_PROFILING)\n\tget_target_property(TRACY_INTERFACE TracyClient INTERFACE_COMPILE_DEFINITIONS)\n\tlist(REMOVE_ITEM TRACY_INTERFACE \"TRACY_ENABLE\")\n\tset_target_properties(TracyClient PROPERTIES INTERFACE_COMPILE_DEFINITIONS \"${TRACY_INTERFACE}\")\nendif()\n"
  },
  {
    "path": "data/BRDFsData/GGX/Glass/ExponentCorrection.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 110,\n   \"id\": \"6992e79b-eec0-4bd5-8e69-13e2a002e919\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"relative_eta * -13.439691641502959+roughness * -14.648819997830152+relative_eta*relative_eta * 6.8789072129524165+relative_eta * roughness * 2.277848368999256+roughness*roughness * 30.73190238514913+relative_eta*relative_eta*relative_eta * -1.1621500927731714+relative_eta*relative_eta * roughness * 0.37498830694981306+relative_eta * roughness*roughness * 0.8224134608255266+roughness*roughness*roughness * -21.325291375291403+\\n\",\n      \"\\n\",\n      \"Fitting Errors:\\n\",\n      \"Mean Squared Error (MSE): 1.513004991070829\\n\",\n      \"Root Mean Squared Error (RMSE): 1.2300426785566543\\n\",\n      \"Mean Absolute Error (MAE): 0.6879266691669605\\n\",\n      \"\\n\",\n      \"\\n\",\n      \"float lower_relative_eta_bound;\\n\",\n      \"float lower_correction;\\n\",\n      \"if (relative_eta > 1.01f && relative_eta <= 1.02f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.01f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.4f, 2.45f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.45f, 2.4665f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.4665f, 2.52f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.52f, 2.55f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = 2.55f;\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.55f, 2.585f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.585f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.02f && relative_eta <= 1.03f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.02f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.51f, 2.54f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.54f, 2.565f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.565f, 2.57f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.57f, 2.59f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.59f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.03f && relative_eta <= 1.1f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.03f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.51f, 2.544f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.544f, 2.565f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.565f, 2.58f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.58f, 2.6f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.1f && relative_eta <= 1.2f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.1f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.54f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.54f, 2.575f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.575f, 2.61f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.61f, 2.63f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.63f, 2.6f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.2f && relative_eta <= 1.4f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.2f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.55f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.55f, 2.65f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.65f, 2.675f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.675f, 2.7f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.7f, 2.675f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.675f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.4f && relative_eta <= 1.5f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.4f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.7f, 2.875f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.875f, 2.925f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.925f, 2.95f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.95f, 2.8f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.8f, 2.55f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 1.5f && relative_eta <= 2.0f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 1.5f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.6f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.6f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.7f, 2.95f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.95f, 3.1f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = 3.1f;\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.1f, 3.05f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.05f, 2.57f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 2.0f && relative_eta <= 2.4f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 2.0f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.5f, 2.2f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.2f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.75f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.75f, 3.5f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.5f, 4.85f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(4.85f, 6.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(6.0f, 7.0f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(7.0f, 2.57f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 2.4f && relative_eta <= 3.0f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 2.4f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.5f, 2.0f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.0f, 2.44f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.44f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 3.0f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.0f, 3.8f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.8f, 7.0f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(7.0f, 10.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(10.0f, 12.0f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(12.0f, 3.9f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta > 3.0f)\\n\",\n      \"{\\n\",\n      \"\\tlower_relative_eta_bound = 3.0f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\tlower_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.5f, 1.7f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(1.7f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.475f, 2.9f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(2.9f, 3.8f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(3.8f, 7.5f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(7.5f, 12.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(12.0f, 13.75f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\tlower_correction = hippt::lerp(13.75f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"float higher_relative_eta_bound;\\n\",\n      \"float higher_correction;\\n\",\n      \"if (relative_eta <= 1.01f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.01f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.4f, 2.45f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.45f, 2.4665f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.4665f, 2.52f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.52f, 2.55f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = 2.55f;\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.55f, 2.585f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.585f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.02f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.02f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.51f, 2.54f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.54f, 2.565f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.565f, 2.57f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.57f, 2.59f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.59f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.03f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.03f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.51f, 2.544f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.544f, 2.565f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.565f, 2.58f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.58f, 2.6f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.1f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.1f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.54f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.54f, 2.575f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.575f, 2.61f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.61f, 2.63f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.63f, 2.6f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.2f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.2f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.55f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.55f, 2.65f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.65f, 2.675f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.675f, 2.7f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.7f, 2.675f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.675f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.4f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.4f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.7f, 2.875f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.875f, 2.925f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.925f, 2.95f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.95f, 2.8f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.8f, 2.55f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 1.5f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 1.5f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.6f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.6f, 2.3f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.7f, 2.95f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.95f, 3.1f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = 3.1f;\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.1f, 3.05f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.05f, 2.57f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 2.0f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 2.0f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.5f, 2.2f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.2f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.75f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.75f, 3.5f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.5f, 4.85f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(4.85f, 6.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(6.0f, 7.0f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(7.0f, 2.57f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 2.4f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 2.4f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.5f, 2.0f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.0f, 2.44f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.44f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 3.0f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.0f, 3.8f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.8f, 7.0f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(7.0f, 10.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(10.0f, 12.0f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(12.0f, 3.9f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"else if (relative_eta <= 3.0f)\\n\",\n      \"{\\n\",\n      \"\\thigher_relative_eta_bound = 3.0f;\\n\",\n      \"\\n\",\n      \"\\tif (roughness <= 0.0f)\\n\",\n      \"\\t\\thigher_correction = 2.5f;\\n\",\n      \"\\telse if (roughness <= 0.1f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.2f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.5f, 1.7f, (roughness - 0.1f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.3f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(1.7f, 2.38f, (roughness - 0.2f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.4f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.5f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.475f, 2.9f, (roughness - 0.4f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.6f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(2.9f, 3.8f, (roughness - 0.5f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.7f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(3.8f, 7.5f, (roughness - 0.6f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.8f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(7.5f, 12.0f, (roughness - 0.7f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 0.9f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(12.0f, 13.75f, (roughness - 0.8f) / 0.1f);\\n\",\n      \"\\telse if (roughness <= 1.0f)\\n\",\n      \"\\t\\thigher_correction = hippt::lerp(13.75f, 2.5f, (roughness - 0.9f) / 0.1f);\\n\",\n      \"}\\n\",\n      \"\\n\",\n      \"return hippt::lerp(lower_correction, higher_correction, (relative_eta - lower_relative_eta_bound) / (higher_relative_eta_bound - lower_relative_eta_bound));\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"import re\\n\",\n    \"from sklearn.preprocessing import PolynomialFeatures\\n\",\n    \"from sklearn.linear_model import LinearRegression\\n\",\n    \"\\n\",\n    \"DEGREE = 3\\n\",\n    \"\\n\",\n    \"# Notebook for fitting a polynomial for the correction exponent for glass dielectrics \\n\",\n    \"# given a certain IOR and roughness\\n\",\n    \"#\\n\",\n    \"# The 'values'  array has been eyeballed manually to try and minimize the visual energy loss/gain\\n\",\n    \"# The idea of the polynomial is to replace the massive if(),  else if(), else if() that would have been\\n\",\n    \"# needed otherwise\\n\",\n    \"#\\n\",\n    \"# TURNS OUT THAT THE FITTING ERROR IS TOO LARGE AND THE POLYNOMIAL\\n\",\n    \"# FITTED BY THIS SCRIPT IS THUS NEVER USED IN THE RENDERER\\n\",\n    \"# \\n\",\n    \"# Instead, we just translate the double entry table IO-Roughness into a massive and disgusting\\n\",\n    \"# if(), else if(). This is exactly what we wanted to avoid but in the end, it's efficient and fits well.\\n\",\n    \"# The only single downside is that it looks disgusting but who cares?\\n\",\n    \"\\n\",\n    \"ior_values = [1.01, 1.02, 1.03, 1.1, 1.2, 1.4, 1.5, 2.0, 2.4, 3.0]\\n\",\n    \"roughness_values = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]\\n\",\n    \"\\n\",\n    \"# Table of output values (for each combination of IOR and roughness)\\n\",\n    \"# For example, these could represent reflectance values:\\n\",\n    \"values = np.array([\\n\",\n    \"    [2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5],  # Roughness 0.0\\n\",\n    \"    [2.5, 2.5, 2.5, 2.5, 1.8, 1.8, 1.6, 1.5, 1.5, 1.5],  # Roughness 0.1\\n\",\n    \"    [2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.3, 2.2, 2, 1.7],  # Roughness 0.2\\n\",\n    \"    [2.4, 2.4, 2.4, 2.38, 2.38, 2.38, 2.38, 2.38, 2.44, 2.38],  # Roughness 0.3\\n\",\n    \"    [2.45, 2.475, 2.475, 2.475, 2.475, 2.475, 2.475, 2.475, 2.475, 2.475],  # Roughness 0.4\\n\",\n    \"    [2.4665, 2.51, 2.51, 2.54, 2.55, 2.7, 2.7, 2.75, 3, 2.9],  # Roughness 0.5\\n\",\n    \"    [2.52, 2.54, 2.544, 2.575, 2.65, 2.875, 2.95, 3.5, 3.8, 3.8],  # Roughness 0.6\\n\",\n    \"    [2.55, 2.565, 2.565, 2.61, 2.675, 2.925, 3.1, 4.85, 7, 7.5],  # Roughness 0.7\\n\",\n    \"    [2.55, 2.57, 2.58, 2.63, 2.7, 2.95, 3.1, 6, 10, 12],  # Roughness 0.8\\n\",\n    \"    [2.585, 2.59, 2.6, 2.6, 2.675, 2.8, 3.05, 7, 12, 13.75],  # Roughness 0.9\\n\",\n    \"    [2.5, 2.5, 2.5, 2.5, 2.5, 2.55, 2.57, 2.57, 3.9, 2.5],  # Roughness 1.0\\n\",\n    \"])\\n\",\n    \"\\n\",\n    \"# Create combinations of IOR and roughness\\n\",\n    \"ior, roughness = np.meshgrid(ior_values, roughness_values)  # Create grid\\n\",\n    \"ior = ior.ravel()  # Flatten to 1D array\\n\",\n    \"roughness = roughness.ravel()  # Flatten to 1D array\\n\",\n    \"outputs = values.ravel()  # Flatten table of outputs\\n\",\n    \"\\n\",\n    \"# Prepare design matrix for polynomial features\\n\",\n    \"X = np.column_stack((ior, roughness))\\n\",\n    \"poly = PolynomialFeatures(degree=DEGREE, include_bias=False)  # Bias=False avoids adding constant\\n\",\n    \"X_poly = poly.fit_transform(X)\\n\",\n    \"\\n\",\n    \"# Fit the polynomial model\\n\",\n    \"model = LinearRegression()\\n\",\n    \"model.fit(X_poly, outputs)\\n\",\n    \"\\n\",\n    \"# Extract coefficients\\n\",\n    \"intercept = model.intercept_  # Constant term\\n\",\n    \"coefficients = model.coef_  # Remaining terms\\n\",\n    \"\\n\",\n    \"# Display results\\n\",\n    \"# print(\\\"Intercept (constant term):\\\", intercept)\\n\",\n    \"# print(\\\"Coefficients (for polynomial terms):\\\", coefficients)\\n\",\n    \"\\n\",\n    \"# Polynomial interpretation\\n\",\n    \"terms = poly.get_feature_names_out([\\\"relative_eta\\\", \\\"roughness\\\"])\\n\",\n    \"for term, coef in zip(terms, coefficients):\\n\",\n    \"    # Replace '^2' with ' * IOR' or ' * Roughness' for terms like IOR^2\\n\",\n    \"    term = re.sub(r\\\"(relative_eta|roughness)\\\\^([0-9]+)\\\", lambda m: \\\"*\\\".join([m.group(1)] * int(m.group(2))), term)\\n\",\n    \"\\n\",\n    \"    # If the term has more than one factor, join them with ' * '\\n\",\n    \"    formatted_term = \\\" * \\\".join(term.split(\\\" \\\"))\\n\",\n    \"    \\n\",\n    \"    print(f\\\"{formatted_term} * {coef}\\\", end='+')\\n\",\n    \"\\n\",\n    \"from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score\\n\",\n    \"\\n\",\n    \"# Predict the values using the fitted model\\n\",\n    \"predicted_outputs = model.predict(X_poly)\\n\",\n    \"\\n\",\n    \"# Calculate error metrics\\n\",\n    \"mse = mean_squared_error(outputs, predicted_outputs)\\n\",\n    \"rmse = np.sqrt(mse)\\n\",\n    \"mae = mean_absolute_error(outputs, predicted_outputs)\\n\",\n    \"\\n\",\n    \"# Display the errors\\n\",\n    \"print(\\\"\\\\n\\\\nFitting Errors:\\\")\\n\",\n    \"print(f\\\"Mean Squared Error (MSE): {mse}\\\")\\n\",\n    \"print(f\\\"Root Mean Squared Error (RMSE): {rmse}\\\")\\n\",\n    \"print(f\\\"Mean Absolute Error (MAE): {mae}\\\")\\n\",\n    \"\\n\",\n    \"#######\\n\",\n    \"# Printing the massive if(), else if() block so that it is ready to be copy pasted in the shader\\n\",\n    \"#######\\n\",\n    \"\\n\",\n    \"print()\\n\",\n    \"print()\\n\",\n    \"\\n\",\n    \"print(\\\"float lower_relative_eta_bound;\\\")\\n\",\n    \"print(\\\"float lower_correction;\\\")\\n\",\n    \"for i in range(0, len(ior_values)):\\n\",\n    \"    relative_eta = ior_values[i]\\n\",\n    \"    next_relative_eta = 1\\n\",\n    \"    if (i != len(ior_values) - 1):\\n\",\n    \"        next_relative_eta = ior_values[i + 1]\\n\",\n    \"\\n\",\n    \"    if (i == 0):\\n\",\n    \"        print(\\\"if (\\\", end='');\\n\",\n    \"    else:\\n\",\n    \"        print(\\\"else if (\\\", end='');\\n\",\n    \"    if (i != len(ior_values) - 1):\\n\",\n    \"        print(\\\"relative_eta > \\\" + str(relative_eta) + \\\"f && relative_eta <= \\\" + str(next_relative_eta) + \\\"f)\\\\n{\\\")\\n\",\n    \"    else:\\n\",\n    \"        print(\\\"relative_eta > \\\" + str(relative_eta) + \\\"f)\\\\n{\\\")\\n\",\n    \"    \\n\",\n    \"    print(\\\"\\\\tlower_relative_eta_bound = \\\" + str(relative_eta) + \\\"f;\\\\n\\\")\\n\",\n    \"    for j in range(0, len(roughness_values)):\\n\",\n    \"        roughness = roughness_values[j]\\n\",\n    \"        str_roughness = \\\"\\\"\\n\",\n    \"        str_roughness_minus_1 = \\\"\\\"\\n\",\n    \"        if (j == 0):\\n\",\n    \"            str_roughness = \\\"0.0f\\\"\\n\",\n    \"        elif (j == len(roughness_values) - 1):\\n\",\n    \"            str_roughness = \\\"1.0f\\\"\\n\",\n    \"            str_roughness_minus_1 = str(round(roughness - 1 / (len(roughness_values) - 1), 1)) + \\\"f\\\"\\n\",\n    \"        else:\\n\",\n    \"            str_roughness = str(roughness) + \\\"f\\\"\\n\",\n    \"            str_roughness_minus_1 = str(round(roughness - 1 / (len(roughness_values) - 1), 1)) + \\\"f\\\"\\n\",\n    \"\\n\",\n    \"        print(\\\"\\\\t\\\", end='')\\n\",\n    \"        if (j == 0):\\n\",\n    \"            print(\\\"if (\\\", end='')\\n\",\n    \"        else:\\n\",\n    \"            print(\\\"else if (\\\", end='')\\n\",\n    \"\\n\",\n    \"        print(\\\"roughness <= \\\" + str_roughness + \\\")\\\")\\n\",\n    \"\\n\",\n    \"        if (j == 0):\\n\",\n    \"            print(\\\"\\\\t\\\\tlower_correction = \\\" + str(values[j][i]) +  \\\"f;\\\")\\n\",\n    \"        else:\\n\",\n    \"            lower_lerp = values[j - 1][i]\\n\",\n    \"            higher_lerp = values[j][i]\\n\",\n    \"            if (lower_lerp == higher_lerp):\\n\",\n    \"                print(\\\"\\\\t\\\\tlower_correction = \\\" + str(lower_lerp) + \\\"f;\\\")\\n\",\n    \"            else:\\n\",\n    \"                print(\\\"\\\\t\\\\tlower_correction = hippt::lerp(\\\" + str(lower_lerp) + \\\"f, \\\" + str(higher_lerp) + \\\"f, (roughness - \\\" + str_roughness_minus_1 + \\\") / 0.1f);\\\")\\n\",\n    \"\\n\",\n    \"    print(\\\"}\\\")\\n\",\n    \"    \\n\",\n    \"print(\\\"float higher_relative_eta_bound;\\\")\\n\",\n    \"print(\\\"float higher_correction;\\\")\\n\",\n    \"for i in range(0, len(ior_values)):\\n\",\n    \"    relative_eta = ior_values[i]\\n\",\n    \"\\n\",\n    \"    if (i == 0):\\n\",\n    \"        print(\\\"if (\\\", end='');\\n\",\n    \"    else:\\n\",\n    \"        print(\\\"else if (\\\", end='');\\n\",\n    \"    print(\\\"relative_eta <= \\\" + str(relative_eta) + \\\"f)\\\\n{\\\")\\n\",\n    \"\\n\",\n    \"    print(\\\"\\\\thigher_relative_eta_bound = \\\" + str(relative_eta) + \\\"f;\\\\n\\\")\\n\",\n    \"    for j in range(0, len(roughness_values)):\\n\",\n    \"        roughness = roughness_values[j]\\n\",\n    \"        str_roughness = \\\"\\\"\\n\",\n    \"        str_roughness_minus_1 = \\\"\\\"\\n\",\n    \"        if (j == 0):\\n\",\n    \"            str_roughness = \\\"0.0f\\\"\\n\",\n    \"        elif (j == len(roughness_values) - 1):\\n\",\n    \"            str_roughness = \\\"1.0f\\\"\\n\",\n    \"            str_roughness_minus_1 = str(round(roughness - 1 / (len(roughness_values) - 1), 1)) + \\\"f\\\"\\n\",\n    \"        else:\\n\",\n    \"            str_roughness = str(roughness) + \\\"f\\\"\\n\",\n    \"            str_roughness_minus_1 = str(round(roughness - 1 / (len(roughness_values) - 1), 1)) + \\\"f\\\"\\n\",\n    \"\\n\",\n    \"        print(\\\"\\\\t\\\", end='')\\n\",\n    \"        if (j == 0):\\n\",\n    \"            print(\\\"if (\\\", end='')\\n\",\n    \"        else:\\n\",\n    \"            print(\\\"else if (\\\", end='')\\n\",\n    \"\\n\",\n    \"        print(\\\"roughness <= \\\" + str_roughness + \\\")\\\")\\n\",\n    \"\\n\",\n    \"        if (j == 0):\\n\",\n    \"            print(\\\"\\\\t\\\\thigher_correction = \\\" + str(values[j][i]) +  \\\"f;\\\")\\n\",\n    \"        else:\\n\",\n    \"            lower_lerp = values[j - 1][i]\\n\",\n    \"            higher_lerp = values[j][i]\\n\",\n    \"            if (lower_lerp == higher_lerp):\\n\",\n    \"                print(\\\"\\\\t\\\\thigher_correction = \\\" + str(lower_lerp) + \\\"f;\\\")\\n\",\n    \"            else:\\n\",\n    \"                print(\\\"\\\\t\\\\thigher_correction = hippt::lerp(\\\" + str(lower_lerp) + \\\"f, \\\" + str(higher_lerp) + \\\"f, (roughness - \\\" + str_roughness_minus_1 + \\\") / 0.1f);\\\")\\n\",\n    \"\\n\",\n    \"    print(\\\"}\\\")\\n\",\n    \"\\n\",\n    \"print(\\\"\\\\nreturn hippt::lerp(lower_correction, higher_correction, (relative_eta - lower_relative_eta_bound) / (higher_relative_eta_bound - lower_relative_eta_bound));\\\")\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.11.5\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "data/GLTFs/cornell_pbr.gltf",
    "content": "{\n\t\"asset\":{\n\t\t\"generator\":\"Khronos glTF Blender I/O v4.2.70\",\n\t\t\"version\":\"2.0\"\n\t},\n\t\"extensionsUsed\":[\n\t\t\"KHR_materials_clearcoat\",\n\t\t\"KHR_materials_transmission\",\n\t\t\"KHR_materials_emissive_strength\",\n\t\t\"KHR_materials_specular\",\n\t\t\"KHR_materials_ior\"\n\t],\n\t\"scene\":0,\n\t\"scenes\":[\n\t\t{\n\t\t\t\"name\":\"Scene\",\n\t\t\t\"nodes\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2\n\t\t\t]\n\t\t}\n\t],\n\t\"nodes\":[\n\t\t{\n\t\t\t\"mesh\":0,\n\t\t\t\"name\":\"cornell.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"camera\":0,\n\t\t\t\"name\":\"Camera.001\",\n\t\t\t\"translation\":[\n\t\t\t\t-0.10000000149011612,\n\t\t\t\t1,\n\t\t\t\t6.499999523162842\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":1,\n\t\t\t\"name\":\"Sphere\",\n\t\t\t\"scale\":[\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3562004268169403,\n\t\t\t\t0.800000011920929,\n\t\t\t\t0.47223222255706787\n\t\t\t]\n\t\t}\n\t],\n\t\"cameras\":[\n\t\t{\n\t\t\t\"name\":\"Camera.001\",\n\t\t\t\"perspective\":{\n\t\t\t\t\"aspectRatio\":1.7777777777777777,\n\t\t\t\t\"yfov\":0.4038851857185364,\n\t\t\t\t\"zfar\":100,\n\t\t\t\t\"znear\":0.10000000149011612\n\t\t\t},\n\t\t\t\"type\":\"perspective\"\n\t\t}\n\t],\n\t\"materials\":[\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_specular\":{\n\t\t\t\t\t\"specularFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"shortBox.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.09947466850280762,\n\t\t\t\t\t0.09947466850280762,\n\t\t\t\t\t0.09947466850280762,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"leftWall.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t1,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":100\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"light.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.9009850025177002\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"tallBox.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t1,\n\t\t\t\t\t1,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.6837720274925232\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"floor.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.8418859839439392\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"backWall.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.9009850025177002\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ceiling.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t1,\n\t\t\t\t\t1,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.9009850025177002\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"rightWall.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t1,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1.4500000476837158\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.8000000715255737,\n\t\t\t\t\t0,\n\t\t\t\t\t0.004255097359418869,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t}\n\t],\n\t\"meshes\":[\n\t\t{\n\t\t\t\"name\":\"cornell.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":0,\n\t\t\t\t\t\t\"NORMAL\":1\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":2,\n\t\t\t\t\t\"material\":0\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":3,\n\t\t\t\t\t\t\"NORMAL\":4\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":1\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":6,\n\t\t\t\t\t\t\"NORMAL\":7\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":2\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":8,\n\t\t\t\t\t\t\"NORMAL\":9\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":2,\n\t\t\t\t\t\"material\":3\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":10,\n\t\t\t\t\t\t\"NORMAL\":11\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":4\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":12,\n\t\t\t\t\t\t\"NORMAL\":13\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":5\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":14,\n\t\t\t\t\t\t\"NORMAL\":15\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":6\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":16,\n\t\t\t\t\t\t\"NORMAL\":17\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":5,\n\t\t\t\t\t\"material\":7\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":18,\n\t\t\t\t\t\t\"NORMAL\":19,\n\t\t\t\t\t\t\"TEXCOORD_0\":20\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":21,\n\t\t\t\t\t\"material\":8\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t],\n\t\"accessors\":[\n\t\t{\n\t\t\t\"bufferView\":0,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":20,\n\t\t\t\"max\":[\n\t\t\t\t0.699999988079071,\n\t\t\t\t0.75,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.05000000074505806,\n\t\t\t\t0,\n\t\t\t\t-0.6000000238418579\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":1,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":20,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":2,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":30,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":3,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t-0.9900000095367432,\n\t\t\t\t0.9900000095367432,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0199999809265137,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t-1.9900000095367432\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":4,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":5,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":6,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.23000000417232513,\n\t\t\t\t0.1599999964237213,\n\t\t\t\t-1.9800000190734863\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.23999999463558197,\n\t\t\t\t-0.2199999988079071,\n\t\t\t\t-1.9800000190734863\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":7,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":8,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":20,\n\t\t\t\"max\":[\n\t\t\t\t0.03999999910593033,\n\t\t\t\t0.09000000357627869,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.7099999785423279,\n\t\t\t\t-0.6700000166893005,\n\t\t\t\t-1.2000000476837158\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":9,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":20,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":10,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0.9900000095367432,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0099999904632568,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":11,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":12,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0199999809265137,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t-1.9900000095367432\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":13,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":14,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0.9900000095367432,\n\t\t\t\t-1.9900000095367432\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0199999809265137,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t-1.9900000095367432\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":15,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":16,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0.9900000095367432,\n\t\t\t\t0\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t1,\n\t\t\t\t-1.0399999618530273,\n\t\t\t\t-1.9900000095367432\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":17,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":18,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4223,\n\t\t\t\"max\":[\n\t\t\t\t0.9999998807907104,\n\t\t\t\t1,\n\t\t\t\t0.9999997615814209\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999997019767761,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":19,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4223,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":20,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4223,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":21,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":24192,\n\t\t\t\"type\":\"SCALAR\"\n\t\t}\n\t],\n\t\"bufferViews\":[\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":240,\n\t\t\t\"byteOffset\":0,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":240,\n\t\t\t\"byteOffset\":240,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":60,\n\t\t\t\"byteOffset\":480,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":588,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":636,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":696,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":240,\n\t\t\t\"byteOffset\":744,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":240,\n\t\t\t\"byteOffset\":984,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1224,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1272,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1320,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1368,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1416,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1464,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1512,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1560,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":50676,\n\t\t\t\"byteOffset\":1608,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":50676,\n\t\t\t\"byteOffset\":52284,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":33784,\n\t\t\t\"byteOffset\":102960,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48384,\n\t\t\t\"byteOffset\":136744,\n\t\t\t\"target\":34963\n\t\t}\n\t],\n\t\"buffers\":[\n\t\t{\n\t\t\t\"byteLength\":185128,\n\t\t\t\"uri\":\"cornell_pbr.bin\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "data/GLTFs/multi-dispersion.gltf",
    "content": "{\n\t\"asset\":{\n\t\t\"generator\":\"Khronos glTF Blender I/O v4.2.70\",\n\t\t\"version\":\"2.0\"\n\t},\n\t\"extensionsUsed\":[\n\t\t\"KHR_materials_transmission\"\n\t],\n\t\"scene\":0,\n\t\"scenes\":[\n\t\t{\n\t\t\t\"name\":\"Scene\",\n\t\t\t\"nodes\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t3,\n\t\t\t\t4,\n\t\t\t\t5,\n\t\t\t\t6,\n\t\t\t\t7,\n\t\t\t\t8,\n\t\t\t\t9,\n\t\t\t\t10,\n\t\t\t\t11,\n\t\t\t\t12,\n\t\t\t\t13,\n\t\t\t\t14,\n\t\t\t\t15,\n\t\t\t\t16,\n\t\t\t\t17,\n\t\t\t\t18,\n\t\t\t\t19,\n\t\t\t\t20,\n\t\t\t\t21,\n\t\t\t\t22,\n\t\t\t\t23,\n\t\t\t\t24,\n\t\t\t\t25,\n\t\t\t\t26,\n\t\t\t\t27,\n\t\t\t\t28,\n\t\t\t\t29,\n\t\t\t\t30,\n\t\t\t\t31,\n\t\t\t\t32,\n\t\t\t\t33,\n\t\t\t\t34,\n\t\t\t\t35,\n\t\t\t\t36,\n\t\t\t\t37,\n\t\t\t\t38,\n\t\t\t\t39,\n\t\t\t\t40,\n\t\t\t\t41,\n\t\t\t\t42,\n\t\t\t\t43,\n\t\t\t\t44,\n\t\t\t\t45,\n\t\t\t\t46,\n\t\t\t\t47,\n\t\t\t\t48,\n\t\t\t\t49,\n\t\t\t\t50,\n\t\t\t\t51\n\t\t\t]\n\t\t}\n\t],\n\t\"nodes\":[\n\t\t{\n\t\t\t\"mesh\":0,\n\t\t\t\"name\":\"Big\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.18059857189655304,\n\t\t\t\t0.04401765391230583,\n\t\t\t\t0.2745361924171448,\n\t\t\t\t0.9434386491775513\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.12121788412332535,\n\t\t\t\t0.08730625361204147,\n\t\t\t\t-0.17660263180732727\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"camera\":0,\n\t\t\t\"name\":\"Camera\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.022813020274043083,\n\t\t\t\t0.9744870066642761,\n\t\t\t\t0.11683055013418198,\n\t\t\t\t0.19027690589427948\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2137400358915329,\n\t\t\t\t0.26666662096977234,\n\t\t\t\t-0.9217798113822937\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":1,\n\t\t\t\"name\":\"Plane\",\n\t\t\t\"scale\":[\n\t\t\t\t68.26968383789062,\n\t\t\t\t68.26968383789062,\n\t\t\t\t68.26968383789062\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.03350231796503067,\n\t\t\t\t0,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":2,\n\t\t\t\"name\":\"GeoSphere003.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2717084586620331,\n\t\t\t\t0.8061394691467285,\n\t\t\t\t-0.16540518403053284,\n\t\t\t\t0.4989537298679352\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.20849435031414032,\n\t\t\t\t0.011372121050953865,\n\t\t\t\t-0.33038368821144104\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":3,\n\t\t\t\"name\":\"GeoSphere003.002\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.505194365978241,\n\t\t\t\t0.028006816282868385,\n\t\t\t\t0.8624654412269592,\n\t\t\t\t0.012149344198405743\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.5041190385818481,\n\t\t\t\t0.007328478619456291,\n\t\t\t\t-0.3771926760673523\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":4,\n\t\t\t\"name\":\"GeoSphere003.003\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.18523885309696198,\n\t\t\t\t-0.8525427579879761,\n\t\t\t\t0.2537952959537506,\n\t\t\t\t0.41766682267189026\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.17694644629955292,\n\t\t\t\t0.011370167136192322,\n\t\t\t\t-0.3251064717769623\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":5,\n\t\t\t\"name\":\"GeoSphere003.004\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.26327353715896606,\n\t\t\t\t-0.9462590217590332,\n\t\t\t\t-0.1835026741027832,\n\t\t\t\t0.04009705409407616\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.11534974724054337,\n\t\t\t\t0.011368044652044773,\n\t\t\t\t-0.30308225750923157\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":6,\n\t\t\t\"name\":\"GeoSphere003.005\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2972252368927002,\n\t\t\t\t-0.18090642988681793,\n\t\t\t\t-0.11166474223136902,\n\t\t\t\t0.9308388829231262\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.02503671869635582,\n\t\t\t\t0.011372373439371586,\n\t\t\t\t-0.08266749233007431\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":7,\n\t\t\t\"name\":\"GeoSphere003.006\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.24270711839199066,\n\t\t\t\t0.18106096982955933,\n\t\t\t\t0.20432241261005402,\n\t\t\t\t0.9308934211730957\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05786854028701782,\n\t\t\t\t0.011371721513569355,\n\t\t\t\t0.03448312357068062\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":8,\n\t\t\t\"name\":\"GeoSphere003.007\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.30991220474243164,\n\t\t\t\t-0.9454272389411926,\n\t\t\t\t-0.04572071135044098,\n\t\t\t\t0.08961784839630127\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006503880023956\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.10885285586118698,\n\t\t\t\t0.011380909010767937,\n\t\t\t\t-0.022299697622656822\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":9,\n\t\t\t\"name\":\"GeoSphere003.008\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.15861886739730835,\n\t\t\t\t0.3616071045398712,\n\t\t\t\t-0.2718893587589264,\n\t\t\t\t0.8775856494903564\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.11128944158554077,\n\t\t\t\t0.011369084939360619,\n\t\t\t\t-0.06842060387134552\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":10,\n\t\t\t\"name\":\"GeoSphere003.009\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2916436493396759,\n\t\t\t\t-0.19744791090488434,\n\t\t\t\t-0.13127796351909637,\n\t\t\t\t0.9266738295555115\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.022038046270608902,\n\t\t\t\t0.01136020291596651,\n\t\t\t\t-0.10843902826309204\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":11,\n\t\t\t\"name\":\"GeoSphere003.010\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.1330464482307434,\n\t\t\t\t0.239720419049263,\n\t\t\t\t-0.28655847907066345,\n\t\t\t\t0.9179962277412415\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.39776915311813354,\n\t\t\t\t0.011412850581109524,\n\t\t\t\t0.36004236340522766\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":12,\n\t\t\t\"name\":\"GeoSphere003.011\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.25378233194351196,\n\t\t\t\t-0.717930257320404,\n\t\t\t\t0.1858185976743698,\n\t\t\t\t0.6210009455680847\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.23853854835033417,\n\t\t\t\t0.01137317530810833,\n\t\t\t\t-0.36935099959373474\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":13,\n\t\t\t\"name\":\"GeoSphere003.012\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.8004741072654724,\n\t\t\t\t-0.02516813389956951,\n\t\t\t\t0.598798930644989,\n\t\t\t\t0.006902455817908049\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.3570106029510498,\n\t\t\t\t0.007246554829180241,\n\t\t\t\t-0.3618093430995941\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":14,\n\t\t\t\"name\":\"GeoSphere003.013\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.9252974390983582,\n\t\t\t\t0.03435178101062775,\n\t\t\t\t-0.37766754627227783,\n\t\t\t\t0.0034516039304435253\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.07533064484596252,\n\t\t\t\t0.00737784942612052,\n\t\t\t\t-0.3752342760562897\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":15,\n\t\t\t\"name\":\"GeoSphere003.014\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.018853910267353058,\n\t\t\t\t0.024486443027853966,\n\t\t\t\t0.9993785619735718,\n\t\t\t\t0.016955794766545296\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006503880023956,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.1543421447277069,\n\t\t\t\t0.007303744088858366,\n\t\t\t\t0.10264983028173447\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":16,\n\t\t\t\"name\":\"GeoSphere003.015\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.05616613104939461,\n\t\t\t\t-0.941322386264801,\n\t\t\t\t-0.3144487142562866,\n\t\t\t\t0.10899325460195541\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.10102517902851105,\n\t\t\t\t0.011370140127837658,\n\t\t\t\t0.052674245089292526\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":17,\n\t\t\t\"name\":\"GeoSphere003.016\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4871572256088257,\n\t\t\t\t-0.018992548808455467,\n\t\t\t\t-0.8731009364128113,\n\t\t\t\t0.003448193660005927\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.296036958694458,\n\t\t\t\t0.007107077166438103,\n\t\t\t\t-0.2748781442642212\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":18,\n\t\t\t\"name\":\"GeoSphere003.017\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5666192173957825,\n\t\t\t\t0.014482385478913784,\n\t\t\t\t-0.8233940005302429,\n\t\t\t\t0.027481136843562126\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.38634154200553894,\n\t\t\t\t0.007334591820836067,\n\t\t\t\t-0.3463435471057892\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":19,\n\t\t\t\"name\":\"GeoSphere003.018\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.2868089973926544,\n\t\t\t\t-0.02325175516307354,\n\t\t\t\t-0.12863944470882416,\n\t\t\t\t0.9490268230438232\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.3769674003124237,\n\t\t\t\t0.011365294456481934,\n\t\t\t\t-0.31926217675209045\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":20,\n\t\t\t\"name\":\"GeoSphere003.019\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2635954022407532,\n\t\t\t\t-0.9462764859199524,\n\t\t\t\t0.1861024647951126,\n\t\t\t\t0.021076705306768417\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.17578920722007751,\n\t\t\t\t0.011364184319972992,\n\t\t\t\t0.1279369443655014\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":21,\n\t\t\t\"name\":\"GeoSphere003.020\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.981778621673584,\n\t\t\t\t0.028752772137522697,\n\t\t\t\t-0.18784041702747345,\n\t\t\t\t0.00028548462432809174\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.052354007959365845,\n\t\t\t\t0.0072728716768324375,\n\t\t\t\t0.06500548124313354\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":22,\n\t\t\t\"name\":\"GeoSphere003.021\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.6077518463134766,\n\t\t\t\t0.007505889516323805,\n\t\t\t\t-0.7933579683303833,\n\t\t\t\t0.03412551432847977\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.020264793187379837,\n\t\t\t\t0.007347577717155218,\n\t\t\t\t0.05279305577278137\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":23,\n\t\t\t\"name\":\"GeoSphere003.022\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.29746347665786743,\n\t\t\t\t0.4036971926689148,\n\t\t\t\t0.11747676879167557,\n\t\t\t\t0.8571717143058777\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.011225644499063492,\n\t\t\t\t0.011366810649633408,\n\t\t\t\t-0.052403487265110016\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":24,\n\t\t\t\"name\":\"GeoSphere003.023\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.15131594240665436,\n\t\t\t\t0.3455721139907837,\n\t\t\t\t-0.27116018533706665,\n\t\t\t\t0.8855257034301758\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.020422939211130142,\n\t\t\t\t0.011381527408957481,\n\t\t\t\t-0.0693168044090271\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":25,\n\t\t\t\"name\":\"GeoSphere003.024\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.5399572253227234,\n\t\t\t\t-0.005164918024092913,\n\t\t\t\t-0.8411224484443665,\n\t\t\t\t0.03053930588066578\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.07693775743246078,\n\t\t\t\t0.007273177616298199,\n\t\t\t\t-0.45747384428977966\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":26,\n\t\t\t\"name\":\"GeoSphere003.025\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2752854824066162,\n\t\t\t\t0.8250166177749634,\n\t\t\t\t-0.14071622490882874,\n\t\t\t\t0.4730375111103058\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.09479845315217972,\n\t\t\t\t0.011392923071980476,\n\t\t\t\t-0.34356972575187683\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":27,\n\t\t\t\"name\":\"GeoSphere003.026\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.21575218439102173,\n\t\t\t\t0.4657882750034332,\n\t\t\t\t-0.2441786229610443,\n\t\t\t\t0.8227207064628601\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.07972949743270874,\n\t\t\t\t0.011360841803252697,\n\t\t\t\t-0.29636630415916443\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":28,\n\t\t\t\"name\":\"GeoSphere003.027\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6783722639083862,\n\t\t\t\t0.015337063930928707,\n\t\t\t\t0.7341387867927551,\n\t\t\t\t0.02482239529490471\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.16386955976486206,\n\t\t\t\t0.007287341635674238,\n\t\t\t\t-0.35593411326408386\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":29,\n\t\t\t\"name\":\"GeoSphere003.028\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6260389089584351,\n\t\t\t\t0.027038700878620148,\n\t\t\t\t0.7792672514915466,\n\t\t\t\t0.009315108880400658\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05065222829580307,\n\t\t\t\t0.007316164206713438,\n\t\t\t\t-0.39160624146461487\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":30,\n\t\t\t\"name\":\"GeoSphere003.029\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.29577139019966125,\n\t\t\t\t0.9138772487640381,\n\t\t\t\t-0.1264592409133911,\n\t\t\t\t0.24770089983940125\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.026655618101358414,\n\t\t\t\t0.011398173868656158,\n\t\t\t\t0.6290746927261353\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":31,\n\t\t\t\"name\":\"GeoSphere003.030\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.2367924451828003,\n\t\t\t\t-0.6930391192436218,\n\t\t\t\t-0.2132471799850464,\n\t\t\t\t0.6466465592384338\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.2075895071029663,\n\t\t\t\t0.011371053755283356,\n\t\t\t\t0.12243460863828659\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":32,\n\t\t\t\"name\":\"GeoSphere003.031\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.2906414866447449,\n\t\t\t\t-0.5638951063156128,\n\t\t\t\t0.06424777954816818,\n\t\t\t\t0.7703388929367065\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.46048444509506226,\n\t\t\t\t0.011488606221973896,\n\t\t\t\t-0.1842009425163269\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":33,\n\t\t\t\"name\":\"GeoSphere003.032\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.11308038979768753,\n\t\t\t\t0.023495102301239967,\n\t\t\t\t0.9932093024253845,\n\t\t\t\t0.014003305695950985\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.4645261764526367,\n\t\t\t\t0.00723264180123806,\n\t\t\t\t-0.2942407727241516\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":34,\n\t\t\t\"name\":\"GeoSphere003.033\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.8586738705635071,\n\t\t\t\t0.020679840818047523,\n\t\t\t\t0.511664628982544,\n\t\t\t\t0.02123766951262951\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.1340523511171341,\n\t\t\t\t0.00732304947450757,\n\t\t\t\t0.06637275964021683\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":35,\n\t\t\t\"name\":\"GeoSphere003.034\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3220692574977875,\n\t\t\t\t0.8323050737380981,\n\t\t\t\t-0.03735608980059624,\n\t\t\t\t0.44960448145866394\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.14520026743412018,\n\t\t\t\t0.011362835764884949,\n\t\t\t\t0.13079720735549927\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":36,\n\t\t\t\"name\":\"GeoSphere003.035\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.2184649407863617,\n\t\t\t\t0.8654321432113647,\n\t\t\t\t0.23974138498306274,\n\t\t\t\t0.3818695545196533\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.12440294772386551,\n\t\t\t\t0.011363371275365353,\n\t\t\t\t0.10869459807872772\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":37,\n\t\t\t\"name\":\"GeoSphere003.036\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.14333312213420868,\n\t\t\t\t-0.7966905832290649,\n\t\t\t\t0.28383228182792664,\n\t\t\t\t0.513983428478241\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.028535349294543266,\n\t\t\t\t0.011363580822944641,\n\t\t\t\t-0.5388242602348328\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":38,\n\t\t\t\"name\":\"GeoSphere003.037\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5051876902580261,\n\t\t\t\t-0.03213047236204147,\n\t\t\t\t-0.8624112010002136,\n\t\t\t\t0.00020426412811502814\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.008328701369464397,\n\t\t\t\t0.007349267136305571,\n\t\t\t\t-0.4517853856086731\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":39,\n\t\t\t\"name\":\"GeoSphere003.038\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.18252430856227875,\n\t\t\t\t-0.9161030054092407,\n\t\t\t\t0.2706860303878784,\n\t\t\t\t0.2327428162097931\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.02293732576072216,\n\t\t\t\t0.011366519145667553,\n\t\t\t\t-0.5010008811950684\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":40,\n\t\t\t\"name\":\"GeoSphere003.039\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.3251582086086273,\n\t\t\t\t0.26995760202407837,\n\t\t\t\t0.009040526114404202,\n\t\t\t\t0.9062634110450745\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.029143130406737328,\n\t\t\t\t0.011376534588634968,\n\t\t\t\t-0.47301021218299866\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":41,\n\t\t\t\"name\":\"GeoSphere003.040\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.3161882162094116,\n\t\t\t\t-0.7897778749465942,\n\t\t\t\t0.024548182263970375,\n\t\t\t\t0.5250460505485535\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.14146271347999573,\n\t\t\t\t0.011362064629793167,\n\t\t\t\t-0.3374761641025543\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":42,\n\t\t\t\"name\":\"GeoSphere003.041\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.8519217371940613,\n\t\t\t\t-0.0034365083556622267,\n\t\t\t\t-0.5236560702323914,\n\t\t\t\t0.0013694290537387133\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.16632358729839325,\n\t\t\t\t0.006841893773525953,\n\t\t\t\t-0.30004122853279114\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":43,\n\t\t\t\"name\":\"GeoSphere003.042\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.09581859409809113,\n\t\t\t\t-0.025791911408305168,\n\t\t\t\t-0.9950175881385803,\n\t\t\t\t0.009676595218479633\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006503880023956,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.10315526276826859,\n\t\t\t\t0.007282526697963476,\n\t\t\t\t-0.18153215944766998\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":44,\n\t\t\t\"name\":\"GeoSphere003.043\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.6865485906600952,\n\t\t\t\t0.015963802114129066,\n\t\t\t\t-0.7263807058334351,\n\t\t\t\t0.02770187519490719\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.01211047824472189,\n\t\t\t\t0.007334437221288681,\n\t\t\t\t-0.09995658695697784\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":45,\n\t\t\t\"name\":\"GeoSphere003.044\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.07489863783121109,\n\t\t\t\t-0.8347724676132202,\n\t\t\t\t0.3080703318119049,\n\t\t\t\t0.45015305280685425\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.48611730337142944,\n\t\t\t\t0.011453031562268734,\n\t\t\t\t-0.054811492562294006\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":46,\n\t\t\t\"name\":\"GeoSphere003.045\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.2053934782743454,\n\t\t\t\t0.08531651645898819,\n\t\t\t\t0.243620827794075,\n\t\t\t\t0.9440251588821411\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.24661734700202942,\n\t\t\t\t0.011371743865311146,\n\t\t\t\t-0.3051324784755707\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":47,\n\t\t\t\"name\":\"GeoSphere003.046\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.9538770318031311,\n\t\t\t\t0.02990647964179516,\n\t\t\t\t-0.29867857694625854,\n\t\t\t\t0.0039438814856112\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.1312432438135147,\n\t\t\t\t0.007307452615350485,\n\t\t\t\t-0.36265984177589417\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":48,\n\t\t\t\"name\":\"GeoSphere003.047\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.22314214706420898,\n\t\t\t\t-0.719915509223938,\n\t\t\t\t-0.22631610929965973,\n\t\t\t\t0.6170172691345215\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.09208523482084274,\n\t\t\t\t0.011365761049091816,\n\t\t\t\t-0.2695140540599823\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":49,\n\t\t\t\"name\":\"GeoSphere003.048\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.3145400285720825,\n\t\t\t\t-0.7661687135696411,\n\t\t\t\t0.042526405304670334,\n\t\t\t\t0.5587858557701111\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.034097280353307724,\n\t\t\t\t0.011358587071299553,\n\t\t\t\t-0.45633184909820557\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":50,\n\t\t\t\"name\":\"GeoSphere003.049\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.8170110583305359,\n\t\t\t\t-0.03178128972649574,\n\t\t\t\t0.5755193829536438,\n\t\t\t\t0.01613527163863182\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006502389907837,\n\t\t\t\t0.13006500899791718\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.11040344834327698,\n\t\t\t\t0.007420437876135111,\n\t\t\t\t-0.3653630018234253\n\t\t\t]\n\t\t}\n\t],\n\t\"cameras\":[\n\t\t{\n\t\t\t\"name\":\"Camera.001\",\n\t\t\t\"perspective\":{\n\t\t\t\t\"aspectRatio\":1.7777777777777777,\n\t\t\t\t\"yfov\":0.39959652046304894,\n\t\t\t\t\"zfar\":1000,\n\t\t\t\t\"znear\":0.10000000149011612\n\t\t\t},\n\t\t\t\"type\":\"perspective\"\n\t\t}\n\t],\n\t\"materials\":[\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.003\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"name\":\"Material.002\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.10000000149011612,\n\t\t\t\t\t0.10000000149011612,\n\t\t\t\t\t0.10000000149011612,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.005\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.007\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.008\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.009\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.010\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.011\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.012\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.013\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.014\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.015\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.016\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.017\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.018\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.019\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.020\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.021\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.022\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.023\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.024\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.025\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.026\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.027\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.028\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.029\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.030\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.031\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.032\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.033\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.034\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.035\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.036\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.037\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.038\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.039\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.040\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.041\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.042\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.043\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.044\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.045\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.046\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.047\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.048\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.049\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.050\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.051\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.052\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.053\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.054\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t}\n\t],\n\t\"meshes\":[\n\t\t{\n\t\t\t\"name\":\"Mesh.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":0,\n\t\t\t\t\t\t\"NORMAL\":1,\n\t\t\t\t\t\t\"TEXCOORD_0\":2\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":4,\n\t\t\t\t\t\t\"NORMAL\":5,\n\t\t\t\t\t\t\"TEXCOORD_0\":6\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":7,\n\t\t\t\t\t\"material\":1\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":8,\n\t\t\t\t\t\t\"NORMAL\":9,\n\t\t\t\t\t\t\"TEXCOORD_0\":10\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":2\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.005\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":11,\n\t\t\t\t\t\t\"NORMAL\":12,\n\t\t\t\t\t\t\"TEXCOORD_0\":13\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":3\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.006\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":14,\n\t\t\t\t\t\t\"NORMAL\":15,\n\t\t\t\t\t\t\"TEXCOORD_0\":16\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":4\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.007\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":17,\n\t\t\t\t\t\t\"NORMAL\":18,\n\t\t\t\t\t\t\"TEXCOORD_0\":19\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":5\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.008\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":20,\n\t\t\t\t\t\t\"NORMAL\":21,\n\t\t\t\t\t\t\"TEXCOORD_0\":22\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":6\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.009\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":23,\n\t\t\t\t\t\t\"NORMAL\":24,\n\t\t\t\t\t\t\"TEXCOORD_0\":25\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":7\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.010\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":26,\n\t\t\t\t\t\t\"NORMAL\":27,\n\t\t\t\t\t\t\"TEXCOORD_0\":28\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":8\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.011\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":29,\n\t\t\t\t\t\t\"NORMAL\":30,\n\t\t\t\t\t\t\"TEXCOORD_0\":31\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":9\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.012\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":32,\n\t\t\t\t\t\t\"NORMAL\":33,\n\t\t\t\t\t\t\"TEXCOORD_0\":34\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.013\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":35,\n\t\t\t\t\t\t\"NORMAL\":36,\n\t\t\t\t\t\t\"TEXCOORD_0\":37\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.014\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":38,\n\t\t\t\t\t\t\"NORMAL\":39,\n\t\t\t\t\t\t\"TEXCOORD_0\":40\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":12\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.015\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":41,\n\t\t\t\t\t\t\"NORMAL\":42,\n\t\t\t\t\t\t\"TEXCOORD_0\":43\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":13\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.016\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":44,\n\t\t\t\t\t\t\"NORMAL\":45,\n\t\t\t\t\t\t\"TEXCOORD_0\":46\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":14\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.017\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":47,\n\t\t\t\t\t\t\"NORMAL\":48,\n\t\t\t\t\t\t\"TEXCOORD_0\":49\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":15\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.018\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":50,\n\t\t\t\t\t\t\"NORMAL\":51,\n\t\t\t\t\t\t\"TEXCOORD_0\":52\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":16\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.019\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":53,\n\t\t\t\t\t\t\"NORMAL\":54,\n\t\t\t\t\t\t\"TEXCOORD_0\":55\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":17\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.020\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":56,\n\t\t\t\t\t\t\"NORMAL\":57,\n\t\t\t\t\t\t\"TEXCOORD_0\":58\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":18\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.021\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":59,\n\t\t\t\t\t\t\"NORMAL\":60,\n\t\t\t\t\t\t\"TEXCOORD_0\":61\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":19\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.022\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":62,\n\t\t\t\t\t\t\"NORMAL\":63,\n\t\t\t\t\t\t\"TEXCOORD_0\":64\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.023\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":65,\n\t\t\t\t\t\t\"NORMAL\":66,\n\t\t\t\t\t\t\"TEXCOORD_0\":67\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.024\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":68,\n\t\t\t\t\t\t\"NORMAL\":69,\n\t\t\t\t\t\t\"TEXCOORD_0\":70\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":22\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.025\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":71,\n\t\t\t\t\t\t\"NORMAL\":72,\n\t\t\t\t\t\t\"TEXCOORD_0\":73\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":23\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.026\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":74,\n\t\t\t\t\t\t\"NORMAL\":75,\n\t\t\t\t\t\t\"TEXCOORD_0\":76\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":24\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.027\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":77,\n\t\t\t\t\t\t\"NORMAL\":78,\n\t\t\t\t\t\t\"TEXCOORD_0\":79\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":25\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.028\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":80,\n\t\t\t\t\t\t\"NORMAL\":81,\n\t\t\t\t\t\t\"TEXCOORD_0\":82\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.029\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":83,\n\t\t\t\t\t\t\"NORMAL\":84,\n\t\t\t\t\t\t\"TEXCOORD_0\":85\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":27\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.030\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":86,\n\t\t\t\t\t\t\"NORMAL\":87,\n\t\t\t\t\t\t\"TEXCOORD_0\":88\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":28\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.031\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":89,\n\t\t\t\t\t\t\"NORMAL\":90,\n\t\t\t\t\t\t\"TEXCOORD_0\":91\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":29\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.032\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":92,\n\t\t\t\t\t\t\"NORMAL\":93,\n\t\t\t\t\t\t\"TEXCOORD_0\":94\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":30\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.033\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":95,\n\t\t\t\t\t\t\"NORMAL\":96,\n\t\t\t\t\t\t\"TEXCOORD_0\":97\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":31\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.034\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":98,\n\t\t\t\t\t\t\"NORMAL\":99,\n\t\t\t\t\t\t\"TEXCOORD_0\":100\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":32\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.035\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":101,\n\t\t\t\t\t\t\"NORMAL\":102,\n\t\t\t\t\t\t\"TEXCOORD_0\":103\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":33\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.036\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":104,\n\t\t\t\t\t\t\"NORMAL\":105,\n\t\t\t\t\t\t\"TEXCOORD_0\":106\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":34\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.037\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":107,\n\t\t\t\t\t\t\"NORMAL\":108,\n\t\t\t\t\t\t\"TEXCOORD_0\":109\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":35\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.038\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":110,\n\t\t\t\t\t\t\"NORMAL\":111,\n\t\t\t\t\t\t\"TEXCOORD_0\":112\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.039\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":113,\n\t\t\t\t\t\t\"NORMAL\":114,\n\t\t\t\t\t\t\"TEXCOORD_0\":115\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":37\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.040\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":116,\n\t\t\t\t\t\t\"NORMAL\":117,\n\t\t\t\t\t\t\"TEXCOORD_0\":118\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.041\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":119,\n\t\t\t\t\t\t\"NORMAL\":120,\n\t\t\t\t\t\t\"TEXCOORD_0\":121\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":39\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.042\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":122,\n\t\t\t\t\t\t\"NORMAL\":123,\n\t\t\t\t\t\t\"TEXCOORD_0\":124\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":40\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.043\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":125,\n\t\t\t\t\t\t\"NORMAL\":126,\n\t\t\t\t\t\t\"TEXCOORD_0\":127\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":41\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.044\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":128,\n\t\t\t\t\t\t\"NORMAL\":129,\n\t\t\t\t\t\t\"TEXCOORD_0\":130\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":42\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.045\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":131,\n\t\t\t\t\t\t\"NORMAL\":132,\n\t\t\t\t\t\t\"TEXCOORD_0\":133\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":43\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.046\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":134,\n\t\t\t\t\t\t\"NORMAL\":135,\n\t\t\t\t\t\t\"TEXCOORD_0\":136\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.047\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":137,\n\t\t\t\t\t\t\"NORMAL\":138,\n\t\t\t\t\t\t\"TEXCOORD_0\":139\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":45\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.048\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":140,\n\t\t\t\t\t\t\"NORMAL\":141,\n\t\t\t\t\t\t\"TEXCOORD_0\":142\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":46\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.049\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":143,\n\t\t\t\t\t\t\"NORMAL\":144,\n\t\t\t\t\t\t\"TEXCOORD_0\":145\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":47\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.050\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":146,\n\t\t\t\t\t\t\"NORMAL\":147,\n\t\t\t\t\t\t\"TEXCOORD_0\":148\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":48\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.051\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":149,\n\t\t\t\t\t\t\"NORMAL\":150,\n\t\t\t\t\t\t\"TEXCOORD_0\":151\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":49\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.052\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":152,\n\t\t\t\t\t\t\"NORMAL\":153,\n\t\t\t\t\t\t\"TEXCOORD_0\":154\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":50\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t],\n\t\"accessors\":[\n\t\t{\n\t\t\t\"bufferView\":0,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":1,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":2,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":3,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":4,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":5,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":6,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":7,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":8,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":9,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":10,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":11,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":12,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":13,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":14,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":15,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":16,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":17,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":18,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":19,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":20,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":21,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":22,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":23,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":24,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":25,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":26,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":27,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":28,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":29,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":30,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":31,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":32,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":33,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":34,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":35,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":36,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":37,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":38,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":39,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":40,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":41,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":42,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":43,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":44,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":45,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":46,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":47,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":48,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":49,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":50,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":51,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":52,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":53,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":54,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":55,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":56,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":57,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":58,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":59,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":60,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":61,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":62,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":63,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":64,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":65,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":66,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":67,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":68,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":69,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":70,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":71,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":72,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":73,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":74,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":75,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":76,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":77,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":78,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":79,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":80,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":81,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":82,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":83,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":84,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":85,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":86,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":87,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":88,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":89,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":90,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":91,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":92,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":93,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":94,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":95,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":96,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":97,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":98,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":99,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":100,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":101,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":102,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":103,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":104,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":105,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":106,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":107,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":108,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":109,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":110,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":111,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":112,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":113,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":114,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":115,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":116,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":117,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":118,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":119,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":120,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":121,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":122,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":123,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":124,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":125,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":126,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":127,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":128,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":129,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":130,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":131,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":132,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":133,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":134,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":135,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":136,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":137,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":138,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":139,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":140,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":141,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":142,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":143,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":144,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":145,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":146,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":147,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":148,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":149,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":150,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":151,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":152,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"max\":[\n\t\t\t\t0.13739751279354095,\n\t\t\t\t0.05211269110441208,\n\t\t\t\t0.13802951574325562\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.13643281161785126,\n\t\t\t\t-0.10680017620325089,\n\t\t\t\t-0.13802950084209442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":153,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":154,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"VEC2\"\n\t\t}\n\t],\n\t\"bufferViews\":[\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":0,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":6480,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":12960,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1080,\n\t\t\t\"byteOffset\":17280,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":18360,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":18408,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":18456,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":18488,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":18500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":24980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":31460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":35780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":42260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":48740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":53060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":59540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":66020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":70340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":76820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":83300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":87620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":94100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":100580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":104900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":111380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":117860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":122180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":128660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":135140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":139460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":145940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":152420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":156740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":163220,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":169700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":174020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":180500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":186980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":191300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":197780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":204260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":208580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":215060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":221540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":225860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":232340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":238820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":243140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":249620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":256100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":260420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":266900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":273380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":277700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":284180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":290660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":294980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":301460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":307940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":312260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":318740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":325220,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":329540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":336020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":342500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":346820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":353300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":359780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":364100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":370580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":377060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":381380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":387860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":394340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":398660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":405140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":411620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":415940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":422420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":428900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":433220,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":439700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":446180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":450500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":456980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":463460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":467780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":474260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":480740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":485060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":491540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":498020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":502340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":508820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":515300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":519620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":526100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":532580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":536900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":543380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":549860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":554180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":560660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":567140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":571460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":577940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":584420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":588740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":595220,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":601700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":606020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":612500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":618980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":623300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":629780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":636260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":640580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":647060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":653540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":657860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":664340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":670820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":675140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":681620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":688100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":692420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":698900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":705380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":709700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":716180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":722660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":726980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":733460,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":739940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":744260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":750740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":757220,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":761540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":768020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":774500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":778820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":785300,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":791780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":796100,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":802580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":809060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":813380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":819860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":826340,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":830660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":837140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":843620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":847940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6480,\n\t\t\t\"byteOffset\":854420,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":860900,\n\t\t\t\"target\":34962\n\t\t}\n\t],\n\t\"buffers\":[\n\t\t{\n\t\t\t\"byteLength\":865220,\n\t\t\t\"uri\":\"multi-dispersion.bin\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "data/GLTFs/nested-dielectrics-complex.gltf",
    "content": "{\n\t\"asset\":{\n\t\t\"generator\":\"Khronos glTF Blender I/O v4.1.62\",\n\t\t\"version\":\"2.0\"\n\t},\n\t\"extensionsUsed\":[\n\t\t\"KHR_materials_transmission\",\n\t\t\"KHR_materials_ior\",\n\t\t\"KHR_materials_specular\",\n\t\t\"KHR_materials_emissive_strength\"\n\t],\n\t\"scene\":0,\n\t\"scenes\":[\n\t\t{\n\t\t\t\"name\":\"Scene\",\n\t\t\t\"nodes\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t3,\n\t\t\t\t4,\n\t\t\t\t5,\n\t\t\t\t6\n\t\t\t]\n\t\t}\n\t],\n\t\"nodes\":[\n\t\t{\n\t\t\t\"mesh\":0,\n\t\t\t\"name\":\"Sphere\"\n\t\t},\n\t\t{\n\t\t\t\"mesh\":1,\n\t\t\t\"name\":\"Sphere.001\",\n\t\t\t\"translation\":[\n\t\t\t\t0.014221577905118465,\n\t\t\t\t0,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":2,\n\t\t\t\"name\":\"Plane\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-0.3826834559440613,\n\t\t\t\t0.9238795638084412\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.1114394664764404,\n\t\t\t\t1.827852487564087,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"camera\":0,\n\t\t\t\"name\":\"Camera\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.3826834261417389,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9238795638084412\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0,\n\t\t\t\t5.029460906982422,\n\t\t\t\t5.029465198516846\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":3,\n\t\t\t\"name\":\"Plane.001\",\n\t\t\t\"scale\":[\n\t\t\t\t510.8236389160156,\n\t\t\t\t1,\n\t\t\t\t510.8236389160156\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7477800250053406,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":4,\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"scale\":[\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9598821401596069,\n\t\t\t\t0.033187270164489746,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":5,\n\t\t\t\"name\":\"Torus.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.9547613859176636,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.29737329483032227\n\t\t\t]\n\t\t}\n\t],\n\t\"cameras\":[\n\t\t{\n\t\t\t\"name\":\"Camera.002\",\n\t\t\t\"perspective\":{\n\t\t\t\t\"aspectRatio\":1.7777777777777777,\n\t\t\t\t\"yfov\":0.39959651231765747,\n\t\t\t\t\"zfar\":1000,\n\t\t\t\t\"znear\":0.10000000149011612\n\t\t\t},\n\t\t\t\"type\":\"perspective\"\n\t\t}\n\t],\n\t\"materials\":[\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1.399999976158142\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.004\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_specular\":{\n\t\t\t\t\t\"specularColorFactor\":[\n\t\t\t\t\t\t2.0,\n\t\t\t\t\t\t2.0,\n\t\t\t\t\t\t2.0\n\t\t\t\t\t]\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.005\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0.04319041967391968,\n\t\t\t\t\t0.8040210008621216,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":25\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.006\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t1,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0.38120442628860474,\n\t\t\t\t\t0.8007099628448486,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t}\n\t],\n\t\"meshes\":[\n\t\t{\n\t\t\t\"name\":\"Sphere.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":0,\n\t\t\t\t\t\t\"NORMAL\":1,\n\t\t\t\t\t\t\"TEXCOORD_0\":2\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":4,\n\t\t\t\t\t\t\"NORMAL\":5,\n\t\t\t\t\t\t\"TEXCOORD_0\":6\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":7,\n\t\t\t\t\t\"material\":1\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":8,\n\t\t\t\t\t\t\"NORMAL\":9,\n\t\t\t\t\t\t\"TEXCOORD_0\":10\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":11,\n\t\t\t\t\t\"material\":2\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":12,\n\t\t\t\t\t\t\"NORMAL\":13,\n\t\t\t\t\t\t\"TEXCOORD_0\":14\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":15,\n\t\t\t\t\t\t\"NORMAL\":16,\n\t\t\t\t\t\t\"TEXCOORD_0\":17\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":18,\n\t\t\t\t\t\"material\":3\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Torus.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":19,\n\t\t\t\t\t\t\"NORMAL\":20,\n\t\t\t\t\t\t\"TEXCOORD_0\":21\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":22,\n\t\t\t\t\t\"material\":4\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t],\n\t\"accessors\":[\n\t\t{\n\t\t\t\"bufferView\":0,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"max\":[\n\t\t\t\t0.9999997019767761,\n\t\t\t\t1,\n\t\t\t\t0.9999993443489075\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999990463256836,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":1,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":2,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":3,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2880,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":4,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"max\":[\n\t\t\t\t0.34999987483024597,\n\t\t\t\t0.3499999940395355,\n\t\t\t\t0.3499997556209564\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.34999966621398926,\n\t\t\t\t-0.3499999940395355,\n\t\t\t\t-0.3499999940395355\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":5,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":6,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":7,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2880,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":8,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":9,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":10,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":11,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":12,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":13,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":14,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":15,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":16,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":17,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":18,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":19,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"max\":[\n\t\t\t\t0.6000000238418579,\n\t\t\t\t0.10000000149011612,\n\t\t\t\t0.6000000238418579\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6000000238418579,\n\t\t\t\t-0.10000000149011612,\n\t\t\t\t-0.6000000238418579\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":20,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":21,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":22,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3456,\n\t\t\t\"type\":\"SCALAR\"\n\t\t}\n\t],\n\t\"bufferViews\":[\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":0,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":6708,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4472,\n\t\t\t\"byteOffset\":13416,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5760,\n\t\t\t\"byteOffset\":17888,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":23648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":30356,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4472,\n\t\t\t\"byteOffset\":37064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5760,\n\t\t\t\"byteOffset\":41536,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47296,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47344,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":47392,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":47424,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47436,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47484,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":47532,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":47564,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":47852,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":48140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":48332,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":48404,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":56048,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5096,\n\t\t\t\"byteOffset\":63692,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6912,\n\t\t\t\"byteOffset\":68788,\n\t\t\t\"target\":34963\n\t\t}\n\t],\n\t\"buffers\":[\n\t\t{\n\t\t\t\"byteLength\":75700,\n\t\t\t\"uri\":\"nested-dielectrics-complex.bin\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "data/GLTFs/nested-dielectrics.gltf",
    "content": "{\n\t\"asset\":{\n\t\t\"generator\":\"Khronos glTF Blender I/O v4.4.55\",\n\t\t\"version\":\"2.0\"\n\t},\n\t\"extensionsUsed\":[\n\t\t\"KHR_materials_clearcoat\",\n\t\t\"KHR_materials_transmission\",\n\t\t\"KHR_materials_emissive_strength\",\n\t\t\"KHR_materials_specular\",\n\t\t\"KHR_materials_ior\"\n\t],\n\t\"scene\":0,\n\t\"scenes\":[\n\t\t{\n\t\t\t\"name\":\"Scene\",\n\t\t\t\"nodes\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t3,\n\t\t\t\t4,\n\t\t\t\t5,\n\t\t\t\t6\n\t\t\t]\n\t\t}\n\t],\n\t\"nodes\":[\n\t\t{\n\t\t\t\"mesh\":0,\n\t\t\t\"name\":\"Sphere\"\n\t\t},\n\t\t{\n\t\t\t\"mesh\":1,\n\t\t\t\"name\":\"Sphere.001\",\n\t\t\t\"translation\":[\n\t\t\t\t0.014221577905118465,\n\t\t\t\t0,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":2,\n\t\t\t\"name\":\"Plane\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-0.3826834559440613,\n\t\t\t\t0.9238795638084412\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.1114394664764404,\n\t\t\t\t1.827852487564087,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"camera\":0,\n\t\t\t\"name\":\"Camera\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.3826834261417389,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9238795638084412\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0,\n\t\t\t\t5.029460906982422,\n\t\t\t\t5.029465198516846\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":3,\n\t\t\t\"name\":\"Plane.001\",\n\t\t\t\"scale\":[\n\t\t\t\t6.2513837814331055,\n\t\t\t\t0.01223785150796175,\n\t\t\t\t6.2513837814331055\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7477800250053406,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":4,\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"scale\":[\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224,\n\t\t\t\t0.20000000298023224\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9598821401596069,\n\t\t\t\t0.033187270164489746,\n\t\t\t\t0\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":5,\n\t\t\t\"name\":\"Torus.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.9547613859176636,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.29737329483032227\n\t\t\t]\n\t\t}\n\t],\n\t\"cameras\":[\n\t\t{\n\t\t\t\"name\":\"Camera.002\",\n\t\t\t\"perspective\":{\n\t\t\t\t\"aspectRatio\":1.7777777777777777,\n\t\t\t\t\"yfov\":0.39959651231765747,\n\t\t\t\t\"zfar\":1000,\n\t\t\t\t\"znear\":0.10000000149011612\n\t\t\t},\n\t\t\t\"type\":\"perspective\"\n\t\t}\n\t],\n\t\"materials\":[\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1.399999976158142\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.004\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_specular\":{\n\t\t\t\t\t\"specularColorFactor\":[\n\t\t\t\t\t\t2.0,\n\t\t\t\t\t\t2.0,\n\t\t\t\t\t\t2.0\n\t\t\t\t\t]\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.005\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0.04319041967391968,\n\t\t\t\t\t0.8040210008621216,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":25\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.006\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t1,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0.38120442628860474,\n\t\t\t\t\t0.8007099628448486,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t}\n\t],\n\t\"meshes\":[\n\t\t{\n\t\t\t\"name\":\"Sphere.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":0,\n\t\t\t\t\t\t\"NORMAL\":1,\n\t\t\t\t\t\t\"TEXCOORD_0\":2\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":3,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":4,\n\t\t\t\t\t\t\"NORMAL\":5,\n\t\t\t\t\t\t\"TEXCOORD_0\":6\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":7,\n\t\t\t\t\t\"material\":1\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":8,\n\t\t\t\t\t\t\"NORMAL\":9,\n\t\t\t\t\t\t\"TEXCOORD_0\":10\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":11,\n\t\t\t\t\t\"material\":2\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":12,\n\t\t\t\t\t\t\"NORMAL\":13,\n\t\t\t\t\t\t\"TEXCOORD_0\":14\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":15,\n\t\t\t\t\t\t\"NORMAL\":16,\n\t\t\t\t\t\t\"TEXCOORD_0\":17\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":18,\n\t\t\t\t\t\"material\":3\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Torus.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":19,\n\t\t\t\t\t\t\"NORMAL\":20,\n\t\t\t\t\t\t\"TEXCOORD_0\":21\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":22,\n\t\t\t\t\t\"material\":4\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t],\n\t\"accessors\":[\n\t\t{\n\t\t\t\"bufferView\":0,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"max\":[\n\t\t\t\t0.9999997019767761,\n\t\t\t\t1,\n\t\t\t\t0.9999993443489075\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999990463256836,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":1,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":2,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":3,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2880,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":4,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"max\":[\n\t\t\t\t0.34999987483024597,\n\t\t\t\t0.3499999940395355,\n\t\t\t\t0.3499997556209564\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.34999966621398926,\n\t\t\t\t-0.3499999940395355,\n\t\t\t\t-0.3499999940395355\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":5,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":6,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":559,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":7,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2880,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":8,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":9,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":10,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":11,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":12,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":13,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":14,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":15,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":16,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":17,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":18,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":19,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"max\":[\n\t\t\t\t0.6000000238418579,\n\t\t\t\t0.10000000149011612,\n\t\t\t\t0.6000000238418579\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6000000238418579,\n\t\t\t\t-0.10000000149011612,\n\t\t\t\t-0.6000000238418579\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":20,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":21,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":22,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3456,\n\t\t\t\"type\":\"SCALAR\"\n\t\t}\n\t],\n\t\"bufferViews\":[\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":0,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":6708,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4472,\n\t\t\t\"byteOffset\":13416,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5760,\n\t\t\t\"byteOffset\":17888,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":23648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6708,\n\t\t\t\"byteOffset\":30356,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4472,\n\t\t\t\"byteOffset\":37064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5760,\n\t\t\t\"byteOffset\":41536,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47296,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47344,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":47392,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":47424,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47436,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":47484,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":47532,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":47564,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":47852,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":48140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":48332,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":48404,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":56048,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5096,\n\t\t\t\"byteOffset\":63692,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6912,\n\t\t\t\"byteOffset\":68788,\n\t\t\t\"target\":34963\n\t\t}\n\t],\n\t\"buffers\":[\n\t\t{\n\t\t\t\"byteLength\":75700,\n\t\t\t\"uri\":\"nested-dielectrics.bin\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "data/GLTFs/the-white-room-low.gltf",
    "content": "{\n\t\"asset\":{\n\t\t\"generator\":\"Khronos glTF Blender I/O v4.2.70\",\n\t\t\"version\":\"2.0\"\n\t},\n\t\"extensionsUsed\":[\n\t\t\"KHR_materials_clearcoat\",\n\t\t\"KHR_materials_transmission\",\n\t\t\"KHR_materials_emissive_strength\",\n\t\t\"KHR_materials_ior\"\n\t],\n\t\"scene\":0,\n\t\"scenes\":[\n\t\t{\n\t\t\t\"name\":\"Scene\",\n\t\t\t\"nodes\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2,\n\t\t\t\t3,\n\t\t\t\t4,\n\t\t\t\t5,\n\t\t\t\t6,\n\t\t\t\t7,\n\t\t\t\t8,\n\t\t\t\t9,\n\t\t\t\t10,\n\t\t\t\t11,\n\t\t\t\t12,\n\t\t\t\t13,\n\t\t\t\t14,\n\t\t\t\t15,\n\t\t\t\t16,\n\t\t\t\t17,\n\t\t\t\t18,\n\t\t\t\t19,\n\t\t\t\t20,\n\t\t\t\t21,\n\t\t\t\t22,\n\t\t\t\t23,\n\t\t\t\t24,\n\t\t\t\t25,\n\t\t\t\t26,\n\t\t\t\t27,\n\t\t\t\t28,\n\t\t\t\t29,\n\t\t\t\t30,\n\t\t\t\t31,\n\t\t\t\t32,\n\t\t\t\t33,\n\t\t\t\t34,\n\t\t\t\t35,\n\t\t\t\t36,\n\t\t\t\t37,\n\t\t\t\t38,\n\t\t\t\t39,\n\t\t\t\t40,\n\t\t\t\t41,\n\t\t\t\t42,\n\t\t\t\t43,\n\t\t\t\t44,\n\t\t\t\t45,\n\t\t\t\t46,\n\t\t\t\t47,\n\t\t\t\t48,\n\t\t\t\t49,\n\t\t\t\t50,\n\t\t\t\t51,\n\t\t\t\t52,\n\t\t\t\t53,\n\t\t\t\t54,\n\t\t\t\t55,\n\t\t\t\t56,\n\t\t\t\t57,\n\t\t\t\t58,\n\t\t\t\t59,\n\t\t\t\t60,\n\t\t\t\t61,\n\t\t\t\t62,\n\t\t\t\t63,\n\t\t\t\t64,\n\t\t\t\t65,\n\t\t\t\t66,\n\t\t\t\t67,\n\t\t\t\t68,\n\t\t\t\t69,\n\t\t\t\t70,\n\t\t\t\t71,\n\t\t\t\t72,\n\t\t\t\t73,\n\t\t\t\t74,\n\t\t\t\t75,\n\t\t\t\t76,\n\t\t\t\t77,\n\t\t\t\t78,\n\t\t\t\t79,\n\t\t\t\t80,\n\t\t\t\t81,\n\t\t\t\t82,\n\t\t\t\t83,\n\t\t\t\t84,\n\t\t\t\t85,\n\t\t\t\t86,\n\t\t\t\t87,\n\t\t\t\t88,\n\t\t\t\t89,\n\t\t\t\t90,\n\t\t\t\t91,\n\t\t\t\t92,\n\t\t\t\t93,\n\t\t\t\t94,\n\t\t\t\t95,\n\t\t\t\t96,\n\t\t\t\t97,\n\t\t\t\t98,\n\t\t\t\t99,\n\t\t\t\t100,\n\t\t\t\t101,\n\t\t\t\t102,\n\t\t\t\t103,\n\t\t\t\t104,\n\t\t\t\t105,\n\t\t\t\t106,\n\t\t\t\t107,\n\t\t\t\t108,\n\t\t\t\t109,\n\t\t\t\t110,\n\t\t\t\t111,\n\t\t\t\t112,\n\t\t\t\t113,\n\t\t\t\t114,\n\t\t\t\t115,\n\t\t\t\t116,\n\t\t\t\t117,\n\t\t\t\t118,\n\t\t\t\t119,\n\t\t\t\t120,\n\t\t\t\t121,\n\t\t\t\t122,\n\t\t\t\t123,\n\t\t\t\t124,\n\t\t\t\t125,\n\t\t\t\t126,\n\t\t\t\t127,\n\t\t\t\t128,\n\t\t\t\t129,\n\t\t\t\t130,\n\t\t\t\t131,\n\t\t\t\t132,\n\t\t\t\t133,\n\t\t\t\t134,\n\t\t\t\t135,\n\t\t\t\t136,\n\t\t\t\t137,\n\t\t\t\t138,\n\t\t\t\t139,\n\t\t\t\t140,\n\t\t\t\t154,\n\t\t\t\t155,\n\t\t\t\t156,\n\t\t\t\t157,\n\t\t\t\t158,\n\t\t\t\t159,\n\t\t\t\t160,\n\t\t\t\t161,\n\t\t\t\t162,\n\t\t\t\t163,\n\t\t\t\t164,\n\t\t\t\t165,\n\t\t\t\t166,\n\t\t\t\t167,\n\t\t\t\t168,\n\t\t\t\t169,\n\t\t\t\t170,\n\t\t\t\t171,\n\t\t\t\t172,\n\t\t\t\t173,\n\t\t\t\t174,\n\t\t\t\t175,\n\t\t\t\t176,\n\t\t\t\t177,\n\t\t\t\t178,\n\t\t\t\t179,\n\t\t\t\t180,\n\t\t\t\t181,\n\t\t\t\t182,\n\t\t\t\t183,\n\t\t\t\t184,\n\t\t\t\t185,\n\t\t\t\t186,\n\t\t\t\t187,\n\t\t\t\t188,\n\t\t\t\t189,\n\t\t\t\t190\n\t\t\t]\n\t\t}\n\t],\n\t\"nodes\":[\n\t\t{\n\t\t\t\"mesh\":0,\n\t\t\t\"name\":\"Picture-rail\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.911670207977295,\n\t\t\t\t2.6229679584503174,\n\t\t\t\t3.221792697906494\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":1,\n\t\t\t\"name\":\"Ceiling\",\n\t\t\t\"translation\":[\n\t\t\t\t-0.48702001571655273,\n\t\t\t\t3.168609857559204,\n\t\t\t\t3.2564644813537598\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":2,\n\t\t\t\"name\":\"Plane.009\",\n\t\t\t\"translation\":[\n\t\t\t\t-0.48702001571655273,\n\t\t\t\t0.02875208854675293,\n\t\t\t\t3.2564644813537598\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":3,\n\t\t\t\"name\":\"Skirting-Board\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4447784423828125,\n\t\t\t\t0.03202188014984131,\n\t\t\t\t3.7508115768432617\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":4,\n\t\t\t\"name\":\"Cornis\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5000000596046448,\n\t\t\t\t-0.4999999403953552,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.3613674640655518,\n\t\t\t\t3.0883710384368896,\n\t\t\t\t2.1537551879882812\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":5,\n\t\t\t\"name\":\"Walls\",\n\t\t\t\"translation\":[\n\t\t\t\t0.21100452542304993,\n\t\t\t\t0.02875208854675293,\n\t\t\t\t3.0448410511016846\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":6,\n\t\t\t\"name\":\"Window-panel-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.1740332841873169,\n\t\t\t\t0.6853556632995605,\n\t\t\t\t0.6853557229042053,\n\t\t\t\t0.1740332990884781\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4325926303863525,\n\t\t\t\t2.377373456954956,\n\t\t\t\t0.7042121887207031\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":7,\n\t\t\t\"name\":\"Window-panel-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.17408347129821777,\n\t\t\t\t-0.6853429079055786,\n\t\t\t\t-0.6853430271148682,\n\t\t\t\t0.1740833818912506\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.0055828094482422,\n\t\t\t\t2.377373456954956,\n\t\t\t\t0.7044205665588379\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":8,\n\t\t\t\"name\":\"Window-panel-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409,\n\t\t\t\t0.70710688829422,\n\t\t\t\t7.586152861449591e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.21441829204559326,\n\t\t\t\t2.377373456954956,\n\t\t\t\t0.42468491196632385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":9,\n\t\t\t\"name\":\"Right-Window-Mesh-Emiiter\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6857413053512573,\n\t\t\t\t-0.1725078821182251,\n\t\t\t\t0.17250794172286987,\n\t\t\t\t0.685741126537323\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.3643953502178192,\n\t\t\t\t1.0219993591308594,\n\t\t\t\t0.8908603191375732\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.469131588935852,\n\t\t\t\t1.9036474227905273,\n\t\t\t\t0.6438109278678894\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":9,\n\t\t\t\"name\":\"Left-Window-Mesh-Emiiter\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6861234307289124,\n\t\t\t\t0.17098161578178406,\n\t\t\t\t-0.17098166048526764,\n\t\t\t\t0.6861233711242676\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.3643953204154968,\n\t\t\t\t1.0219993591308594,\n\t\t\t\t0.8908604383468628\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.038603663444519,\n\t\t\t\t1.9036474227905273,\n\t\t\t\t0.646122395992279\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":9,\n\t\t\t\"name\":\"Middle-Window-Mesh-Emiiter\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.4622892439365387,\n\t\t\t\t1,\n\t\t\t\t0.871683657169342\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.21810980141162872,\n\t\t\t\t1.9036474227905273,\n\t\t\t\t0.36019620299339294\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":10,\n\t\t\t\"name\":\"Rear-Window-Mesh-Emiiter\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7071066498756409,\n\t\t\t\t0.70710688829422,\n\t\t\t\t5.338507236274381e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t1.5630946159362793,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.1428062915802002,\n\t\t\t\t2.013288974761963,\n\t\t\t\t8.212237358093262\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":11,\n\t\t\t\"name\":\"Standing-Lamp-Base\",\n\t\t\t\"scale\":[\n\t\t\t\t0.17058558762073517,\n\t\t\t\t0.17058558762073517,\n\t\t\t\t0.17058558762073517\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.6291208267211914,\n\t\t\t\t0.028948593884706497,\n\t\t\t\t1.6885590553283691\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":12,\n\t\t\t\"name\":\"T-lightholder-3\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.039623700082302094,\n\t\t\t\t0.039623696357011795,\n\t\t\t\t0.039623700082302094\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.780541181564331,\n\t\t\t\t1.3029799461364746,\n\t\t\t\t2.9569900035858154\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":13,\n\t\t\t\"name\":\"T-lightholder-2\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.0486246719956398,\n\t\t\t\t0.048624664545059204,\n\t\t\t\t0.0486246719956398\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.780541181564331,\n\t\t\t\t1.3028395175933838,\n\t\t\t\t3.057652235031128\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":14,\n\t\t\t\"name\":\"T-lightholder-1\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.0486246719956398,\n\t\t\t\t0.048624664545059204,\n\t\t\t\t0.0486246719956398\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.780541181564331,\n\t\t\t\t1.3027700185775757,\n\t\t\t\t3.2538845539093018\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":15,\n\t\t\t\"name\":\"3D-Artists-Magazine\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.16750279068946838,\n\t\t\t\t0,\n\t\t\t\t0.9858716130256653\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.11785215884447098,\n\t\t\t\t0.11785216629505157,\n\t\t\t\t0.1425195038318634\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5425272583961487,\n\t\t\t\t0.46624672412872314,\n\t\t\t\t2.958265542984009\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":16,\n\t\t\t\"name\":\"Standing-Lamp-Shade-Inner\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.6291208267211914,\n\t\t\t\t1.4259439706802368,\n\t\t\t\t1.6885590553283691\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":17,\n\t\t\t\"name\":\"Fruit-Bowl\",\n\t\t\t\"scale\":[\n\t\t\t\t0.0804700255393982,\n\t\t\t\t0.0804700255393982,\n\t\t\t\t0.0804700255393982\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.25288286805152893,\n\t\t\t\t0.4664691686630249,\n\t\t\t\t2.9443252086639404\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":18,\n\t\t\t\"name\":\"LOVE-Stand\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.0073470757342875,\n\t\t\t\t0.007347074802964926,\n\t\t\t\t0.0073470757342875\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2202749252319336,\n\t\t\t\t0.6724759936332703,\n\t\t\t\t4.0033159255981445\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":19,\n\t\t\t\"name\":\"LOVE-Base\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.14449827373027802,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.04091236740350723\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2202749252319336,\n\t\t\t\t0.6763082146644592,\n\t\t\t\t4.0033159255981445\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":20,\n\t\t\t\"name\":\"Plane.078\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2156827449798584,\n\t\t\t\t0.7574570775032043,\n\t\t\t\t3.9275951385498047\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":21,\n\t\t\t\"name\":\"Plane.077\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2156827449798584,\n\t\t\t\t0.7539929747581482,\n\t\t\t\t3.971747398376465\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":22,\n\t\t\t\"name\":\"LOVE-Letter-Back-O\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2156825065612793,\n\t\t\t\t0.7626820206642151,\n\t\t\t\t4.010139465332031\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":23,\n\t\t\t\"name\":\"LOVE-Letter-Back-L\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2156825065612793,\n\t\t\t\t0.7714517712593079,\n\t\t\t\t4.0644025802612305\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":24,\n\t\t\t\"name\":\"LOVE-Letter-Back-E\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4947656989097595,\n\t\t\t\t0.4947656989097595,\n\t\t\t\t-0.5051801204681396,\n\t\t\t\t0.5051800012588501\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.207442045211792,\n\t\t\t\t0.76943439245224,\n\t\t\t\t3.9362401962280273\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":25,\n\t\t\t\"name\":\"LOVE-Letter-Back-V\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.209141492843628,\n\t\t\t\t0.8162298798561096,\n\t\t\t\t4.0639543533325195\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":26,\n\t\t\t\"name\":\"LOVE-Letter-O\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.206592321395874,\n\t\t\t\t0.7743820548057556,\n\t\t\t\t4.017569541931152\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":27,\n\t\t\t\"name\":\"LOVE-Letter-L\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162,\n\t\t\t\t0.09015031903982162\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2090871334075928,\n\t\t\t\t0.810184895992279,\n\t\t\t\t4.076678276062012\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":28,\n\t\t\t\"name\":\"Photo-frame-pic-BR\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5026351809501648,\n\t\t\t\t0.497350811958313,\n\t\t\t\t-0.50263512134552,\n\t\t\t\t0.4973509907722473\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4332211017608643,\n\t\t\t\t1.5313353538513184,\n\t\t\t\t3.834543228149414\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":29,\n\t\t\t\"name\":\"Photo-frame-BR\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5026351809501648,\n\t\t\t\t0.497350811958313,\n\t\t\t\t-0.50263512134552,\n\t\t\t\t0.4973509907722473\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.434985876083374,\n\t\t\t\t1.530707836151123,\n\t\t\t\t3.834543228149414\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":30,\n\t\t\t\"name\":\"Photo-frame-BL\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5026351809501648,\n\t\t\t\t0.497350811958313,\n\t\t\t\t-0.50263512134552,\n\t\t\t\t0.4973509907722473\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.434985876083374,\n\t\t\t\t1.530707836151123,\n\t\t\t\t4.0659613609313965\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":31,\n\t\t\t\"name\":\"Photo-frame-pic-BL\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5026351809501648,\n\t\t\t\t0.497350811958313,\n\t\t\t\t-0.50263512134552,\n\t\t\t\t0.4973509907722473\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4332211017608643,\n\t\t\t\t1.5313353538513184,\n\t\t\t\t4.0659613609313965\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":32,\n\t\t\t\"name\":\"Photo-frame-pic-TL\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5042079091072083,\n\t\t\t\t0.4957563281059265,\n\t\t\t\t-0.504207968711853,\n\t\t\t\t0.49575650691986084\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4322803020477295,\n\t\t\t\t1.8389313220977783,\n\t\t\t\t4.0659613609313965\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":33,\n\t\t\t\"name\":\"Photo-frame-TL\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5042079091072083,\n\t\t\t\t0.4957563281059265,\n\t\t\t\t-0.504207968711853,\n\t\t\t\t0.49575650691986084\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.434049129486084,\n\t\t\t\t1.8383150100708008,\n\t\t\t\t4.0659613609313965\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":34,\n\t\t\t\"name\":\"Cushion.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.5113464593887329,\n\t\t\t\t0.6619700193405151,\n\t\t\t\t-0.22544872760772705,\n\t\t\t\t0.49949315190315247\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.8854628801345825,\n\t\t\t\t0.637373685836792,\n\t\t\t\t4.625072002410889\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":35,\n\t\t\t\"name\":\"Ceiling-light-fitting\",\n\t\t\t\"translation\":[\n\t\t\t\t0.3832467794418335,\n\t\t\t\t3.173314332962036,\n\t\t\t\t2.8927838802337646\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":36,\n\t\t\t\"name\":\"Ceiling-light-bulb-fitting\",\n\t\t\t\"translation\":[\n\t\t\t\t0.3832467794418335,\n\t\t\t\t2.7197864055633545,\n\t\t\t\t2.8927838802337646\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":37,\n\t\t\t\"name\":\"Ceiling-light-cord\",\n\t\t\t\"translation\":[\n\t\t\t\t0.3832467794418335,\n\t\t\t\t2.7294976711273193,\n\t\t\t\t2.8927838802337646\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":38,\n\t\t\t\"name\":\"Ceiling-light-shade-wire\",\n\t\t\t\"scale\":[\n\t\t\t\t1.1797983646392822,\n\t\t\t\t1.0473384857177734,\n\t\t\t\t1.1797983646392822\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3832467794418335,\n\t\t\t\t2.7020483016967773,\n\t\t\t\t2.8927838802337646\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":39,\n\t\t\t\"name\":\"Ceiling-light-Shade\",\n\t\t\t\"scale\":[\n\t\t\t\t1.1797983646392822,\n\t\t\t\t1.0473384857177734,\n\t\t\t\t1.1797983646392822\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3832467794418335,\n\t\t\t\t2.410550594329834,\n\t\t\t\t2.8927838802337646\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":40,\n\t\t\t\"name\":\"Photo-frame-TR\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5042079091072083,\n\t\t\t\t0.4957563281059265,\n\t\t\t\t-0.504207968711853,\n\t\t\t\t0.49575650691986084\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.434049129486084,\n\t\t\t\t1.8383150100708008,\n\t\t\t\t3.834543228149414\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":41,\n\t\t\t\"name\":\"Book1\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.054181575775146484,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9985311031341553\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.9334782361984253,\n\t\t\t\t4.177110195159912\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":42,\n\t\t\t\"name\":\"Book5\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.06546599417924881,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9978548884391785\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.9333921074867249,\n\t\t\t\t4.08355712890625\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":43,\n\t\t\t\"name\":\"Book2\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.059965748339891434,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9982004761695862\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490552850067616\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.279275417327881,\n\t\t\t\t0.9342971444129944,\n\t\t\t\t4.148632049560547\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":44,\n\t\t\t\"name\":\"Book3\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.06338035315275192,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9979895353317261\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613074570894241,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.9331662654876709,\n\t\t\t\t4.129520893096924\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":45,\n\t\t\t\"name\":\"Book6\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.10614249855279922,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9943510293960571\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.3038711547851562,\n\t\t\t\t0.9338763356208801,\n\t\t\t\t4.05584192276001\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":46,\n\t\t\t\"name\":\"Book16\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.030540894716978073,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9995335340499878\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613071590662003,\n\t\t\t\t0.01249055564403534\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.663068413734436,\n\t\t\t\t3.724823474884033\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":47,\n\t\t\t\"name\":\"Book17\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.02232883684337139,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9997506737709045\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613070845603943,\n\t\t\t\t0.012490556575357914\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.6628230810165405,\n\t\t\t\t3.6945221424102783\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":48,\n\t\t\t\"name\":\"Book15\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.04502567648887634,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9989858269691467\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490552850067616\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731122970581055,\n\t\t\t\t0.6633697152137756,\n\t\t\t\t3.747779369354248\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":49,\n\t\t\t\"name\":\"Book18\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.008273575454950333,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9999657869338989\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.6619119048118591,\n\t\t\t\t3.6714670658111572\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":50,\n\t\t\t\"name\":\"Book4\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.06292934715747833,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9980179667472839\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613073080778122,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2639031410217285,\n\t\t\t\t0.9350541830062866,\n\t\t\t\t4.10331392288208\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":51,\n\t\t\t\"name\":\"Book8\",\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2657485008239746,\n\t\t\t\t0.9318746328353882,\n\t\t\t\t3.8247056007385254\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":52,\n\t\t\t\"name\":\"Book9\",\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.9318746328353882,\n\t\t\t\t3.800419569015503\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":53,\n\t\t\t\"name\":\"Book10\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.05672706663608551,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.998389720916748\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.285423517227173,\n\t\t\t\t0.9329015016555786,\n\t\t\t\t3.7837345600128174\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":54,\n\t\t\t\"name\":\"Book11\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.04585462808609009,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9989481568336487\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2817344665527344,\n\t\t\t\t0.932712197303772,\n\t\t\t\t3.764090061187744\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":55,\n\t\t\t\"name\":\"Book7\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.01554703526198864,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9998792409896851\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.01249055378139019\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.292801856994629,\n\t\t\t\t0.9318746328353882,\n\t\t\t\t3.8652091026306152\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":56,\n\t\t\t\"name\":\"Book12\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.026683012023568153,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9996439814567566\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613073080778122,\n\t\t\t\t0.012490551918745041\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2878830432891846,\n\t\t\t\t0.9317463636398315,\n\t\t\t\t3.730381488800049\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":57,\n\t\t\t\"name\":\"Book13\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.020824573934078217,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9997831583023071\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613074570894241,\n\t\t\t\t0.01249055564403534\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.27681565284729,\n\t\t\t\t0.9321562647819519,\n\t\t\t\t3.702394962310791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":58,\n\t\t\t\"name\":\"Book14\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.017526455223560333,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.9998463988304138\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.08613072335720062,\n\t\t\t\t0.012490554712712765\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2731266021728516,\n\t\t\t\t0.9320932626724243,\n\t\t\t\t3.6755876541137695\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":59,\n\t\t\t\"name\":\"Chest-of-Drawers-Base\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t-0.7071068286895752,\n\t\t\t\t5.338509012631221e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.832046627998352,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.154634952545166,\n\t\t\t\t0.14899420738220215,\n\t\t\t\t1.7385165691375732\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":60,\n\t\t\t\"name\":\"Chest-of-Drawers-Sides\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-0.7071065902709961,\n\t\t\t\t0.7071070075035095\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.242722511291504,\n\t\t\t\t0.5351095199584961,\n\t\t\t\t1.7384898662567139\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":61,\n\t\t\t\"name\":\"Chest-of-Drawers-Top\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4999999701976776,\n\t\t\t\t0.4999999701976776,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.242722511291504,\n\t\t\t\t1.184232473373413,\n\t\t\t\t1.7385165691375732\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":62,\n\t\t\t\"name\":\"Chest-of-Drawers-Handles\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.347294569015503,\n\t\t\t\t1.0520886182785034,\n\t\t\t\t1.7364802360534668\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":63,\n\t\t\t\"name\":\"Chest-of-Drawers-Drawers-Top\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.3677444458007812,\n\t\t\t\t0.7001770734786987,\n\t\t\t\t1.7385165691375732\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":64,\n\t\t\t\"name\":\"Chest-of-Drawers-Drawers\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.3677444458007812,\n\t\t\t\t0.7815507650375366,\n\t\t\t\t1.7385165691375732\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":65,\n\t\t\t\"name\":\"LCD-TV\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5000002384185791,\n\t\t\t\t0.49999988079071045,\n\t\t\t\t-0.5000001192092896,\n\t\t\t\t0.49999988079071045\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.8813024759292603,\n\t\t\t\t1.8465850353240967,\n\t\t\t\t2.878190755844116\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":66,\n\t\t\t\"name\":\"Plane.042\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.6291208267211914,\n\t\t\t\t1.2779653072357178,\n\t\t\t\t1.6885590553283691\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":67,\n\t\t\t\"name\":\"Cushion.003\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4908314347267151,\n\t\t\t\t-0.8651542067527771,\n\t\t\t\t0.09994808584451675,\n\t\t\t\t0.024558179080486298\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.329519033432007,\n\t\t\t\t0.6274178624153137,\n\t\t\t\t2.938812494277954\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":68,\n\t\t\t\"name\":\"Chair-base.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071069478988647,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.832046389579773\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.42162710428237915,\n\t\t\t\t0.26330262422561646,\n\t\t\t\t1.7494981288909912\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":69,\n\t\t\t\"name\":\"Chair-back.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.445726215839386,\n\t\t\t\t0.548933744430542,\n\t\t\t\t0.44572603702545166,\n\t\t\t\t0.5489336848258972\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.4248037040233612,\n\t\t\t\t0.34699004888534546,\n\t\t\t\t1.4029626846313477\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":70,\n\t\t\t\"name\":\"Chair-arms.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071069478988647,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.832046389579773\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.4243548512458801,\n\t\t\t\t0.05198943614959717,\n\t\t\t\t1.7494981288909912\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":71,\n\t\t\t\"name\":\"Cushion.002\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4925888776779175,\n\t\t\t\t0.009835230186581612,\n\t\t\t\t0.05611172690987587,\n\t\t\t\t0.8683956265449524\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5686255097389221,\n\t\t\t\t0.6308478116989136,\n\t\t\t\t1.5467344522476196\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":72,\n\t\t\t\"name\":\"Standing-Lamp-Shade-Outer\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.6291208267211914,\n\t\t\t\t1.4259439706802368,\n\t\t\t\t1.6885590553283691\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":73,\n\t\t\t\"name\":\"Plane.040\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t1.9949604279645428e-07\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.832046627998352,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.1752309799194336,\n\t\t\t\t0.06389200687408447,\n\t\t\t\t4.070070743560791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":74,\n\t\t\t\"name\":\"Plane.039\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5000001192092896,\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.282989740371704,\n\t\t\t\t0.9208285808563232,\n\t\t\t\t4.070070743560791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":75,\n\t\t\t\"name\":\"Book-Shelf\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071068286895752,\n\t\t\t\t0,\n\t\t\t\t0.7071067094802856\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320465683937073\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.282989740371704,\n\t\t\t\t0.8074179291725159,\n\t\t\t\t4.070070743560791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":76,\n\t\t\t\"name\":\"Coffee-Table-Handle1\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t7.549791547489804e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5338399410247803,\n\t\t\t\t0.06817221641540527,\n\t\t\t\t2.934676170349121\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":77,\n\t\t\t\"name\":\"Coffee-Table-Drawer\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t7.549791547489804e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.055516839027404785,\n\t\t\t\t2.9537901878356934\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":78,\n\t\t\t\"name\":\"Coffee-Table-Suround\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.24178721010684967,\n\t\t\t\t0.24178719520568848,\n\t\t\t\t0.24178719520568848\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.3593718409538269,\n\t\t\t\t2.957340955734253\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":79,\n\t\t\t\"name\":\"Plug-Socket3\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4760635793209076,\n\t\t\t\t-0.5228416919708252,\n\t\t\t\t0.4760635495185852,\n\t\t\t\t0.5228418707847595\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320465087890625,\n\t\t\t\t0.8320465087890625\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t3.0061659812927246,\n\t\t\t\t0.34290236234664917,\n\t\t\t\t1.6741163730621338\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":80,\n\t\t\t\"name\":\"Big-Pic-frame-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5075001120567322,\n\t\t\t\t-0.49238571524620056,\n\t\t\t\t0.507500171661377,\n\t\t\t\t0.492385596036911\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9721333980560303,\n\t\t\t\t1.7000353336334229,\n\t\t\t\t2.652094841003418\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":81,\n\t\t\t\"name\":\"Big-Pic-frame-pic-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5075001120567322,\n\t\t\t\t-0.49238571524620056,\n\t\t\t\t0.507500171661377,\n\t\t\t\t0.492385596036911\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9676499366760254,\n\t\t\t\t1.6997840404510498,\n\t\t\t\t2.652094841003418\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":79,\n\t\t\t\"name\":\"Plug-Socket\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.4760635495185852,\n\t\t\t\t0.52284175157547,\n\t\t\t\t-0.4760635793209076,\n\t\t\t\t0.5228416919708252\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320465087890625,\n\t\t\t\t0.8320465087890625\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4398510456085205,\n\t\t\t\t0.34290236234664917,\n\t\t\t\t3.8262343406677246\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":82,\n\t\t\t\"name\":\"Big-Picture-frame-glass-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5053536295890808,\n\t\t\t\t0.4945884346961975,\n\t\t\t\t-0.5053535103797913,\n\t\t\t\t0.49458855390548706\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.415652275085449,\n\t\t\t\t1.806846022605896,\n\t\t\t\t1.741856575012207\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":83,\n\t\t\t\"name\":\"Big-Picture-frame-pic-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5053536295890808,\n\t\t\t\t0.4945884346961975,\n\t\t\t\t-0.5053535103797913,\n\t\t\t\t0.49458855390548706\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4186534881591797,\n\t\t\t\t1.8066214323043823,\n\t\t\t\t1.741856575012207\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":84,\n\t\t\t\"name\":\"Big-Picture-frame-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5053536295890808,\n\t\t\t\t0.4945884346961975,\n\t\t\t\t-0.5053535103797913,\n\t\t\t\t0.49458855390548706\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.422860622406006,\n\t\t\t\t1.8050754070281982,\n\t\t\t\t1.741856575012207\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":85,\n\t\t\t\"name\":\"Blind-cord-knob-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.2473352551460266,\n\t\t\t\t0,\n\t\t\t\t0.9689300656318665\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4115173816680908,\n\t\t\t\t2.380880117416382,\n\t\t\t\t0.7399067878723145\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":86,\n\t\t\t\"name\":\"Blind-cord-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.2473352551460266,\n\t\t\t\t0,\n\t\t\t\t0.9689300656318665\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4115173816680908,\n\t\t\t\t2.398815870285034,\n\t\t\t\t0.7399067878723145\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":87,\n\t\t\t\"name\":\"Blind-cord-Fixing-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6851370334625244,\n\t\t\t\t-0.1748923659324646,\n\t\t\t\t0.17489241063594818,\n\t\t\t\t0.6851369142532349\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.410313367843628,\n\t\t\t\t2.4108941555023193,\n\t\t\t\t0.7421114444732666\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":88,\n\t\t\t\"name\":\"Blind-Roll-Ends-Fixings-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.2473352551460266,\n\t\t\t\t0,\n\t\t\t\t0.9689300656318665\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4864286184310913,\n\t\t\t\t2.890126943588257,\n\t\t\t\t0.6014595031738281\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":89,\n\t\t\t\"name\":\"Blind-Roll-Ends-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3607974946498871,\n\t\t\t\t-0.608132541179657,\n\t\t\t\t0.608132541179657,\n\t\t\t\t0.3607974946498871\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.3991007804870605,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.7626429796218872\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":90,\n\t\t\t\"name\":\"Blind-drop-bar-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3607974946498871,\n\t\t\t\t-0.608132541179657,\n\t\t\t\t0.608132541179657,\n\t\t\t\t0.3607974946498871\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4121025800704956,\n\t\t\t\t2.6807801723480225,\n\t\t\t\t0.7388352155685425\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":91,\n\t\t\t\"name\":\"Blind-drop-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3607974946498871,\n\t\t\t\t-0.608132541179657,\n\t\t\t\t0.608132541179657,\n\t\t\t\t0.3607974946498871\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.4125889539718628,\n\t\t\t\t2.7497336864471436,\n\t\t\t\t0.7379449605941772\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":92,\n\t\t\t\"name\":\"Blind-roll-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3607974946498871,\n\t\t\t\t-0.608132541179657,\n\t\t\t\t0.608132541179657,\n\t\t\t\t0.3607974946498871\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t1.3991007804870605,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.7626429796218872\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":93,\n\t\t\t\"name\":\"Blind-roll-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6077407598495483,\n\t\t\t\t-0.3614570200443268,\n\t\t\t\t0.36145704984664917,\n\t\t\t\t0.6077407598495483\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9705768823623657,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.7683078050613403\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":94,\n\t\t\t\"name\":\"Blind-drop-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6077407598495483,\n\t\t\t\t-0.3614570200443268,\n\t\t\t\t0.36145704984664917,\n\t\t\t\t0.6077407598495483\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9840114116668701,\n\t\t\t\t2.7497336864471436,\n\t\t\t\t0.7435802221298218\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":95,\n\t\t\t\"name\":\"Blind-drop-bar-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6077407598495483,\n\t\t\t\t-0.3614570200443268,\n\t\t\t\t0.36145704984664917,\n\t\t\t\t0.6077407598495483\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9835271835327148,\n\t\t\t\t2.65942645072937,\n\t\t\t\t0.744471549987793\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":96,\n\t\t\t\"name\":\"Blind-Roll-Ends-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6077407598495483,\n\t\t\t\t-0.3614570200443268,\n\t\t\t\t0.36145704984664917,\n\t\t\t\t0.6077407598495483\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9705768823623657,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.7683078050613403\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":97,\n\t\t\t\"name\":\"Blind-Roll-Ends-Fixings-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.24628375470638275,\n\t\t\t\t0,\n\t\t\t\t0.9691977500915527\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.0586304664611816,\n\t\t\t\t2.890126943588257,\n\t\t\t\t0.6075193881988525\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":98,\n\t\t\t\"name\":\"Blind-cord-Fixing-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.685326337814331,\n\t\t\t\t0.17414893209934235,\n\t\t\t\t-0.17414894700050354,\n\t\t\t\t0.685326337814331\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9817452430725098,\n\t\t\t\t2.389540433883667,\n\t\t\t\t0.7477517127990723\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":99,\n\t\t\t\"name\":\"Blind-cord-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.24628375470638275,\n\t\t\t\t0,\n\t\t\t\t0.9691977500915527\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9829443693161011,\n\t\t\t\t2.377462148666382,\n\t\t\t\t0.74554443359375\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":100,\n\t\t\t\"name\":\"Blind-cord-knob-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.24628375470638275,\n\t\t\t\t0,\n\t\t\t\t0.9691977500915527\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9829443693161011,\n\t\t\t\t2.3595263957977295,\n\t\t\t\t0.74554443359375\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":101,\n\t\t\t\"name\":\"Blind-cord-knob-Middle\",\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.4515154361724854,\n\t\t\t\t0.46897247433662415\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":102,\n\t\t\t\"name\":\"Blind-cord-Middle\",\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.4694507122039795,\n\t\t\t\t0.46897247433662415\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":103,\n\t\t\t\"name\":\"Blind-cord-Fixing-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.4807980060577393,\n\t\t\t\t0.4714844524860382\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":104,\n\t\t\t\"name\":\"Blind-Roll-Ends-Fixings-Middle\",\n\t\t\t\"translation\":[\n\t\t\t\t0.21488434076309204,\n\t\t\t\t2.890126943588257,\n\t\t\t\t0.3115592300891876\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":105,\n\t\t\t\"name\":\"Blind-Roll-Ends-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.4948785603046417\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":106,\n\t\t\t\"name\":\"Blind-drop-bar-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.7497336864471436,\n\t\t\t\t0.4677514135837555\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":107,\n\t\t\t\"name\":\"Blind-drop-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.7497336864471436,\n\t\t\t\t0.46673718094825745\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":108,\n\t\t\t\"name\":\"Blind-roll-Middle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2154962122440338,\n\t\t\t\t2.7530863285064697,\n\t\t\t\t0.4948785603046417\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":79,\n\t\t\t\"name\":\"Plug-Socket2\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.6732555627822876,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.739409863948822\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320465087890625,\n\t\t\t\t0.8320465087890625\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.6679747104644775,\n\t\t\t\t0.28773653507232666,\n\t\t\t\t1.3029168844223022\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":109,\n\t\t\t\"name\":\"Chair-arms\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.1286563873291016,\n\t\t\t\t0.05198943614959717,\n\t\t\t\t2.830965518951416\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":110,\n\t\t\t\"name\":\"Chair-back\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.6303520202636719,\n\t\t\t\t0.7763094305992126\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.475191831588745,\n\t\t\t\t0.34699004888534546,\n\t\t\t\t2.8267672061920166\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":111,\n\t\t\t\"name\":\"Chair-base\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.1286563873291016,\n\t\t\t\t0.26330262422561646,\n\t\t\t\t2.828237771987915\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":112,\n\t\t\t\"name\":\"Sofa-arms\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7071066498756409,\n\t\t\t\t0,\n\t\t\t\t0.7071069478988647\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.832046389579773\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3138567805290222,\n\t\t\t\t0.053914427757263184,\n\t\t\t\t4.4244704246521\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":113,\n\t\t\t\"name\":\"Sofa-back\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.44572609663009644,\n\t\t\t\t-0.548933744430542,\n\t\t\t\t0.4457261562347412,\n\t\t\t\t0.5489336848258972\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320465683937073,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.31503480672836304,\n\t\t\t\t0.3489152193069458,\n\t\t\t\t4.771006107330322\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":114,\n\t\t\t\"name\":\"Sofa-base\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7071066498756409,\n\t\t\t\t0,\n\t\t\t\t0.7071069478988647\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.832046389579773\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.06952560693025589,\n\t\t\t\t0.265227735042572,\n\t\t\t\t4.4244704246521\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":115,\n\t\t\t\"name\":\"Cushion\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.1643114686012268,\n\t\t\t\t0.848813533782959,\n\t\t\t\t-0.4677543044090271,\n\t\t\t\t0.18363890051841736\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.2681952118873596,\n\t\t\t\t0.6439410448074341,\n\t\t\t\t4.570359706878662\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":116,\n\t\t\t\"name\":\"Rug\",\n\t\t\t\"translation\":[\n\t\t\t\t0.5366979837417603,\n\t\t\t\t0.02943408489227295,\n\t\t\t\t2.9517576694488525\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":117,\n\t\t\t\"name\":\"Coffee-Table-drawer2\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.055649399757385254,\n\t\t\t\t2.9589078426361084\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":118,\n\t\t\t\"name\":\"Coffee-Table-Handle2\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.06817221641540527,\n\t\t\t\t2.9793577194213867\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":119,\n\t\t\t\"name\":\"Coffee-Table-Legs\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7071066498756409,\n\t\t\t\t0,\n\t\t\t\t0.7071069478988647\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.832046389579773,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.832046389579773\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.05169498920440674,\n\t\t\t\t2.956437110900879\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":120,\n\t\t\t\"name\":\"Coffee-Table-uprights\",\n\t\t\t\"scale\":[\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177,\n\t\t\t\t0.8320464491844177\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.5331442356109619,\n\t\t\t\t0.055435530841350555,\n\t\t\t\t2.956437110900879\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":121,\n\t\t\t\"name\":\"Rad-Floor-Cover-Left\",\n\t\t\t\"translation\":[\n\t\t\t\t2.9334805011749268,\n\t\t\t\t0.03835034370422363,\n\t\t\t\t3.1527419090270996\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":122,\n\t\t\t\"name\":\"Rad-Floor-Cover-Right\",\n\t\t\t\"translation\":[\n\t\t\t\t2.933389663696289,\n\t\t\t\t0.03835034370422363,\n\t\t\t\t2.0671274662017822\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":123,\n\t\t\t\"name\":\"Rad-Knob\",\n\t\t\t\"translation\":[\n\t\t\t\t2.9334805011749268,\n\t\t\t\t0.25281208753585815,\n\t\t\t\t3.1524460315704346\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":124,\n\t\t\t\"name\":\"Rad-Nut-End-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334259033203125,\n\t\t\t\t0.6420339941978455,\n\t\t\t\t3.103543281555176\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":125,\n\t\t\t\"name\":\"Rad-Nut-End-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409,\n\t\t\t\t-0.70710688829422,\n\t\t\t\t8.432163411953297e-09\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334259033203125,\n\t\t\t\t0.6420339941978455,\n\t\t\t\t2.1122641563415527\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":126,\n\t\t\t\"name\":\"Rad-panel-End-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334259033203125,\n\t\t\t\t0.40119630098342896,\n\t\t\t\t3.0704586505889893\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":127,\n\t\t\t\"name\":\"Rad-panel-End-Right\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409,\n\t\t\t\t-0.70710688829422,\n\t\t\t\t8.432163411953297e-09\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334259033203125,\n\t\t\t\t0.40119630098342896,\n\t\t\t\t2.1455821990966797\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":128,\n\t\t\t\"name\":\"Rad-panel-Mid\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334259033203125,\n\t\t\t\t0.40119630098342896,\n\t\t\t\t3.004451036453247\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":129,\n\t\t\t\"name\":\"Rad-Pipe-ball-Left\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t1.1924880638503055e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.9334805011749268,\n\t\t\t\t0.16174757480621338,\n\t\t\t\t3.1524460315704346\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":130,\n\t\t\t\"name\":\"Rad-Pipework-Left\",\n\t\t\t\"translation\":[\n\t\t\t\t2.934143543243408,\n\t\t\t\t0.24655157327651978,\n\t\t\t\t3.1524460315704346\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":131,\n\t\t\t\"name\":\"Rad-Pipework-Right\",\n\t\t\t\"translation\":[\n\t\t\t\t2.933389663696289,\n\t\t\t\t0.24655157327651978,\n\t\t\t\t2.0923078060150146\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":132,\n\t\t\t\"name\":\"Tea-cup\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.327330619096756,\n\t\t\t\t0,\n\t\t\t\t0.9449098706245422\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.8274208903312683,\n\t\t\t\t0.4649931490421295,\n\t\t\t\t3.107694625854492\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":133,\n\t\t\t\"name\":\"Tea-cup-saucer\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.2557510733604431,\n\t\t\t\t0,\n\t\t\t\t0.9667426943778992\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.8274208903312683,\n\t\t\t\t0.4618278443813324,\n\t\t\t\t3.107694625854492\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":134,\n\t\t\t\"name\":\"Tea-spoon2\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.006642984226346016,\n\t\t\t\t-0.2804909348487854,\n\t\t\t\t0.022725779563188553,\n\t\t\t\t0.9595646262168884\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.8394526243209839,\n\t\t\t\t0.46989956498146057,\n\t\t\t\t3.162907838821411\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":135,\n\t\t\t\"name\":\"Teapot-base\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.06640083342790604,\n\t\t\t\t0,\n\t\t\t\t0.9977930188179016\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.9331851601600647,\n\t\t\t\t0.46185222268104553,\n\t\t\t\t2.9529385566711426\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":136,\n\t\t\t\"name\":\"Teapot-lid\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.06640083342790604,\n\t\t\t\t0,\n\t\t\t\t0.9977930188179016\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.9331851601600647,\n\t\t\t\t0.46100959181785583,\n\t\t\t\t2.9529385566711426\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":137,\n\t\t\t\"name\":\"Radio-back\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t4.17172027445626e-15\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-1.3006994724273682,\n\t\t\t\t-1.3006997108459473,\n\t\t\t\t-23.850276947021484\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05415674299001694,\n\t\t\t\t-0.5765234231948853,\n\t\t\t\t-29.3918399810791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":138,\n\t\t\t\"name\":\"Radio-BUSH-logo\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2.980232594040899e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.9381040930747986,\n\t\t\t\t0.19397583603858948,\n\t\t\t\t-25.337797164916992\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":139,\n\t\t\t\"name\":\"Radio-dial-back\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t4.972420697413327e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-0.7491905689239502,\n\t\t\t\t-0.7491905689239502,\n\t\t\t\t-13.737532615661621\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.46280914545059204,\n\t\t\t\t-0.547615647315979,\n\t\t\t\t-26.8626651763916\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":140,\n\t\t\t\"name\":\"Radio-dial-edge\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2.980232594040899e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.46280914545059204,\n\t\t\t\t-0.547615647315979,\n\t\t\t\t-26.8626651763916\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":141,\n\t\t\t\"name\":\"Radio-dial-glass\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-1,\n\t\t\t\t7.4505766001209395e-09\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.4636607766151428,\n\t\t\t\t0.20150700211524963,\n\t\t\t\t-26.809812545776367\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":142,\n\t\t\t\"name\":\"Radio-feet\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t-0.7071065902709961,\n\t\t\t\t0.7071070671081543,\n\t\t\t\t2.472583560120256e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-0.039950791746377945,\n\t\t\t\t-0.7325574159622192,\n\t\t\t\t-0.2552826702594757\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.9590277075767517,\n\t\t\t\t-0.21494066715240479,\n\t\t\t\t-52.56930160522461\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":143,\n\t\t\t\"name\":\"Radio-front\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-1,\n\t\t\t\t7.4505766001209395e-09\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05423874780535698,\n\t\t\t\t0.1839851438999176,\n\t\t\t\t-26.346830368041992\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":144,\n\t\t\t\"name\":\"Radio-handle-chrome\",\n\t\t\t\"scale\":[\n\t\t\t\t-0.28543949127197266,\n\t\t\t\t-0.28543949127197266,\n\t\t\t\t-5.233961582183838\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.04926714673638344,\n\t\t\t\t-0.2073417454957962,\n\t\t\t\t-5.458431243896484\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":145,\n\t\t\t\"name\":\"Radio-inner\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t-1,\n\t\t\t\t1.1656565490625326e-08\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-1.2783485651016235,\n\t\t\t\t-1.278348684310913,\n\t\t\t\t-23.4404354095459\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05270620435476303,\n\t\t\t\t0.11164035648107529,\n\t\t\t\t-29.391843795776367\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":146,\n\t\t\t\"name\":\"Radio-Knob\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2.980232594040899e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.46366173028945923,\n\t\t\t\t0.18398705124855042,\n\t\t\t\t-26.809812545776367\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":147,\n\t\t\t\"name\":\"Radio-shell\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t4.17172027445626e-15\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-1.3006994724273682,\n\t\t\t\t-1.3006997108459473,\n\t\t\t\t-23.850276947021484\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05415578931570053,\n\t\t\t\t0.13842526078224182,\n\t\t\t\t-29.391843795776367\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":148,\n\t\t\t\"name\":\"Radio-trim-Back\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t4.17172027445626e-15\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-1.3165284395217896,\n\t\t\t\t-1.3075554370880127,\n\t\t\t\t-23.975990295410156\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.054157696664333344,\n\t\t\t\t-0.9042078256607056,\n\t\t\t\t-29.3918399810791\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":149,\n\t\t\t\"name\":\"Radio-trim-front\",\n\t\t\t\"rotation\":[\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t1,\n\t\t\t\t2.980232594040899e-08\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-0.05415865033864975,\n\t\t\t\t-0.2120414674282074,\n\t\t\t\t-29.391841888427734\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"children\":[\n\t\t\t\t141,\n\t\t\t\t142,\n\t\t\t\t143,\n\t\t\t\t144,\n\t\t\t\t145,\n\t\t\t\t146,\n\t\t\t\t147,\n\t\t\t\t148,\n\t\t\t\t149,\n\t\t\t\t150,\n\t\t\t\t151,\n\t\t\t\t152,\n\t\t\t\t153\n\t\t\t],\n\t\t\t\"mesh\":150,\n\t\t\t\"name\":\"Radio-handle\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.28421643376350403,\n\t\t\t\t-0.6474727392196655,\n\t\t\t\t0.6474728584289551,\n\t\t\t\t0.28421640396118164\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t-0.07415362447500229,\n\t\t\t\t-0.07415362447500229,\n\t\t\t\t-0.004044043831527233\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.2290596961975098,\n\t\t\t\t1.4213799238204956,\n\t\t\t\t1.5119402408599854\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":151,\n\t\t\t\"name\":\"Photo-frame-pic-BR.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5026351809501648,\n\t\t\t\t0.497350811958313,\n\t\t\t\t-0.50263512134552,\n\t\t\t\t0.4973509907722473\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.4332211017608643,\n\t\t\t\t1.8389573097229004,\n\t\t\t\t3.834543228149414\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"camera\":0,\n\t\t\t\"name\":\"Camera\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.10218629986047745,\n\t\t\t\t0.2902959883213043,\n\t\t\t\t0.031194215640425682,\n\t\t\t\t0.9509537816047668\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.8709295988082886,\n\t\t\t\t0.8709295392036438,\n\t\t\t\t0.8709296584129333\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.679414749145508,\n\t\t\t\t1.5535809993743896,\n\t\t\t\t6.165511608123779\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":152,\n\t\t\t\"name\":\"Circle.003\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.8469732999801636,\n\t\t\t\t0.14883899688720703,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":153,\n\t\t\t\"name\":\"Fireplace-fire-main-Tray\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.845271110534668,\n\t\t\t\t0.11596381664276123,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":154,\n\t\t\t\"name\":\"Fireplace-fire-main-base\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.829506278038025,\n\t\t\t\t0.1039959192276001,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":155,\n\t\t\t\"name\":\"Fireplace-fire-Grill-tops\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.9010403156280518,\n\t\t\t\t0.31762272119522095,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":156,\n\t\t\t\"name\":\"Plane.019\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.9010403156280518,\n\t\t\t\t0.3052085041999817,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":157,\n\t\t\t\"name\":\"Fireplace-fire-grate\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.845271110534668,\n\t\t\t\t0.1813194751739502,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":158,\n\t\t\t\"name\":\"Fireplace-fire-front-Grill\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.845271110534668,\n\t\t\t\t0.31016409397125244,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":159,\n\t\t\t\"name\":\"Fireplace-fire-back-plane\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.885275959968567,\n\t\t\t\t0.6804149746894836,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":160,\n\t\t\t\"name\":\"Fireplace-fire-main-Arch\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.5,\n\t\t\t\t-0.5,\n\t\t\t\t0.5,\n\t\t\t\t0.5\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.885275959968567,\n\t\t\t\t0.6804149746894836,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":161,\n\t\t\t\"name\":\"Fireplace-Suround-Scrolls\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.70710688829422,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0.7071066498756409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.7559783458709717,\n\t\t\t\t1.2352337837219238,\n\t\t\t\t2.876844644546509\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":162,\n\t\t\t\"name\":\"Plane.015\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.825409173965454,\n\t\t\t\t-1.0686419010162354,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":163,\n\t\t\t\"name\":\"Fireplace-Stone-Half\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.825409173965454,\n\t\t\t\t-1.0485877990722656,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":164,\n\t\t\t\"name\":\"Fireplace-Suround-Top\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.825409173965454,\n\t\t\t\t0.18601608276367188,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":165,\n\t\t\t\"name\":\"Fireplace-Suround-Base\",\n\t\t\t\"translation\":[\n\t\t\t\t-1.825409173965454,\n\t\t\t\t0.09243941307067871,\n\t\t\t\t2.8784449100494385\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":166,\n\t\t\t\"name\":\"Sphere\",\n\t\t\t\"scale\":[\n\t\t\t\t0.02500000037252903,\n\t\t\t\t0.05000000074505806,\n\t\t\t\t0.02500000037252903\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t2.630155324935913,\n\t\t\t\t1.4528168439865112,\n\t\t\t\t1.686143398284912\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":167,\n\t\t\t\"name\":\"Icosphere.005\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3624155521392822,\n\t\t\t\t0.5969444513320923,\n\t\t\t\t0.07239257544279099,\n\t\t\t\t0.7120895981788635\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.42566344141960144,\n\t\t\t\t0.42566347122192383,\n\t\t\t\t0.4256633520126343\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.19917480647563934,\n\t\t\t\t0.507774293422699,\n\t\t\t\t2.924175500869751\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":168,\n\t\t\t\"name\":\"Icosphere.004\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.08696160465478897,\n\t\t\t\t0.2491137534379959,\n\t\t\t\t0.30457642674446106,\n\t\t\t\t0.915212094783783\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.25405651330947876,\n\t\t\t\t0.25405651330947876,\n\t\t\t\t0.25405651330947876\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2456478327512741,\n\t\t\t\t0.4927559494972229,\n\t\t\t\t2.889678478240967\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":169,\n\t\t\t\"name\":\"Sphere.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.0615028440952301,\n\t\t\t\t-0.03091888129711151,\n\t\t\t\t0.1267399936914444,\n\t\t\t\t0.9895446300506592\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.02835756726562977,\n\t\t\t\t0.02835756726562977,\n\t\t\t\t0.028357569128274918\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.27565765380859375,\n\t\t\t\t0.498931348323822,\n\t\t\t\t2.9445929527282715\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":170,\n\t\t\t\"name\":\"Icosphere.001\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.28945568203926086,\n\t\t\t\t-0.43979963660240173,\n\t\t\t\t0.15163302421569824,\n\t\t\t\t0.8365401029586792\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.10750720649957657,\n\t\t\t\t0.10750721395015717,\n\t\t\t\t0.10750720649957657\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.2463006228208542,\n\t\t\t\t0.48019275069236755,\n\t\t\t\t2.993616819381714\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":171,\n\t\t\t\"name\":\"Icosphere.002\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.3760858476161957,\n\t\t\t\t0.4652629494667053,\n\t\t\t\t0.616178572177887,\n\t\t\t\t0.5122635364532471\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.10750720649957657,\n\t\t\t\t0.10750720649957657,\n\t\t\t\t0.10750720649957657\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.24362239241600037,\n\t\t\t\t0.4800707697868347,\n\t\t\t\t2.943013906478882\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":172,\n\t\t\t\"name\":\"Sphere.002\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.02287924475967884,\n\t\t\t\t-0.014835351146757603,\n\t\t\t\t0.16830526292324066,\n\t\t\t\t0.9853578209877014\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.018231168389320374,\n\t\t\t\t0.018231168389320374,\n\t\t\t\t0.018231168389320374\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3063569962978363,\n\t\t\t\t0.4899011254310608,\n\t\t\t\t2.9778501987457275\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":173,\n\t\t\t\"name\":\"Torus\",\n\t\t\t\"scale\":[\n\t\t\t\t0.07076700031757355,\n\t\t\t\t0.07076724618673325,\n\t\t\t\t0.07076724618673325\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.7830919027328491,\n\t\t\t\t1.3236432075500488,\n\t\t\t\t3.2523293495178223\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":174,\n\t\t\t\"name\":\"Torus.002\",\n\t\t\t\"scale\":[\n\t\t\t\t0.051515132188797,\n\t\t\t\t0.051515132188797,\n\t\t\t\t0.051515132188797\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.7831077575683594,\n\t\t\t\t1.3130135536193848,\n\t\t\t\t3.0580222606658936\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":175,\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.001130877761170268,\n\t\t\t\t0.033382151275873184,\n\t\t\t\t-0.0008058790699578822,\n\t\t\t\t0.9994418025016785\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.02203996293246746,\n\t\t\t\t0.02203996293246746,\n\t\t\t\t0.02203996293246746\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9765292406082153,\n\t\t\t\t0.13079993426799774,\n\t\t\t\t2.9674112796783447\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":176,\n\t\t\t\"name\":\"Cube.001\",\n\t\t\t\"scale\":[\n\t\t\t\t0.02203996293246746,\n\t\t\t\t0.02203996293246746,\n\t\t\t\t0.04837724566459656\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9504104852676392,\n\t\t\t\t0.23689241707324982,\n\t\t\t\t2.847280502319336\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":177,\n\t\t\t\"name\":\"Cube.002\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.0001098120555980131,\n\t\t\t\t-0.0047805458307266235,\n\t\t\t\t0.0024924587924033403,\n\t\t\t\t0.9999855756759644\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.010889016091823578,\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-2.0331785678863525,\n\t\t\t\t0.11950372904539108,\n\t\t\t\t2.813229560852051\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":178,\n\t\t\t\"name\":\"Cube.003\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.0008358443737961352,\n\t\t\t\t0.07081835716962814,\n\t\t\t\t-0.005114189814776182,\n\t\t\t\t0.997475802898407\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9475034475326538,\n\t\t\t\t0.11968352645635605,\n\t\t\t\t2.836214780807495\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":179,\n\t\t\t\"name\":\"Cube.004\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.0003359581169206649,\n\t\t\t\t0.002002191497012973,\n\t\t\t\t-0.008173426613211632,\n\t\t\t\t0.9999645352363586\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889016091823578\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9457011222839355,\n\t\t\t\t0.11974502354860306,\n\t\t\t\t2.8892157077789307\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":180,\n\t\t\t\"name\":\"Cube.005\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.03455517068505287,\n\t\t\t\t0.0004973384784534574,\n\t\t\t\t-0.05653561279177666,\n\t\t\t\t0.9978023767471313\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.011178364977240562,\n\t\t\t\t0.01027369312942028,\n\t\t\t\t0.04837724566459656\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9360562562942505,\n\t\t\t\t0.14450013637542725,\n\t\t\t\t2.851992130279541\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":181,\n\t\t\t\"name\":\"Cube.006\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.07428764551877975,\n\t\t\t\t0.032259441912174225,\n\t\t\t\t-0.0026640030555427074,\n\t\t\t\t0.9967113733291626\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.011178364045917988,\n\t\t\t\t0.010273694060742855,\n\t\t\t\t0.04837724566459656\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9860270023345947,\n\t\t\t\t0.1248173639178276,\n\t\t\t\t2.8209848403930664\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":182,\n\t\t\t\"name\":\"Cube.007\",\n\t\t\t\"scale\":[\n\t\t\t\t0.007836089469492435,\n\t\t\t\t0.008200375363230705,\n\t\t\t\t0.1282462328672409\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9504104852676392,\n\t\t\t\t0.23689241707324982,\n\t\t\t\t2.847280502319336\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":183,\n\t\t\t\"name\":\"Cube.008\",\n\t\t\t\"rotation\":[\n\t\t\t\t-0.0011886717984452844,\n\t\t\t\t0.12032090127468109,\n\t\t\t\t-0.004237604793161154,\n\t\t\t\t0.9927253127098083\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.9777460098266602,\n\t\t\t\t0.11967272311449051,\n\t\t\t\t2.7575795650482178\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":184,\n\t\t\t\"name\":\"Cube.009\",\n\t\t\t\"rotation\":[\n\t\t\t\t0.0018595510628074408,\n\t\t\t\t0.0021360747050493956,\n\t\t\t\t-0.001154154073446989,\n\t\t\t\t0.9999954104423523\n\t\t\t],\n\t\t\t\"scale\":[\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889015160501003,\n\t\t\t\t0.010889016091823578\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t-1.8985399007797241,\n\t\t\t\t0.11956838518381119,\n\t\t\t\t2.9718236923217773\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"mesh\":185,\n\t\t\t\"name\":\"Sphere.003\",\n\t\t\t\"scale\":[\n\t\t\t\t0.02500000037252903,\n\t\t\t\t0.05000000074505806,\n\t\t\t\t0.02500000037252903\n\t\t\t],\n\t\t\t\"translation\":[\n\t\t\t\t0.3987860381603241,\n\t\t\t\t2.3758857250213623,\n\t\t\t\t2.8945016860961914\n\t\t\t]\n\t\t}\n\t],\n\t\"cameras\":[\n\t\t{\n\t\t\t\"name\":\"Camera.001\",\n\t\t\t\"perspective\":{\n\t\t\t\t\"aspectRatio\":1.7777777777777777,\n\t\t\t\t\"yfov\":0.9799147248268127,\n\t\t\t\t\"zfar\":20,\n\t\t\t\t\"znear\":0.10000000149011612\n\t\t\t},\n\t\t\t\"type\":\"perspective\"\n\t\t}\n\t],\n\t\"materials\":[\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Painted-white-wood\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Walls\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Floor\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":0\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.17299999296665192\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.8468742966651917,\n\t\t\t\t0.571125328540802\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":10\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Emitter-mid-window\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t0.7196826934814453,\n\t\t\t\t0.8815838098526001,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":5\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Emitter-Rear\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t]\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"lamp-upright-base\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.18900343775749207,\n\t\t\t\t\"roughnessFactor\":0.09793815016746521\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatFactor\":0.10309278219938278\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"candle-holders\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.054479971528053284,\n\t\t\t\t\t0.027320902794599533,\n\t\t\t\t\t0.01938236877322197,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.46460479497909546\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"magazine\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":1\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.17697593569755554\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":0.6271477937698364\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"lampshade-inner\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"FruitBowl\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Love-letters-back\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.08021938055753708,\n\t\t\t\t\t0.04666513204574585,\n\t\t\t\t\t0.0331047959625721,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":10\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Love-letters\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-pica\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":2\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-black\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.04543089121580124,\n\t\t\t\t\t0.04543089121580124,\n\t\t\t\t\t0.04543089121580124,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.40893471240997314,\n\t\t\t\t\"roughnessFactor\":0.1494845449924469\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-picb\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":3\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-pic\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":4\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Cushion1\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":5\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"light-fitting\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ceiling-shade-wire\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.24619978666305542,\n\t\t\t\t\t0.24620160460472107,\n\t\t\t\t\t0.2462015002965927,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.9037800431251526\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Ceiling-lampshade\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":6\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"book-shader\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":7\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"C-Table-box\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.20000000298023224,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"C-Table-handle-metal\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.30288368463516235\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Wide-tv-bevel\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.051249973475933075,\n\t\t\t\t\t0.051249973475933075,\n\t\t\t\t\t0.051249973475933075,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.05670103430747986\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Wide-tv-screen\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.033104557543992996,\n\t\t\t\t\t0.03310480713844299,\n\t\t\t\t\t0.0331047922372818,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.10000000149011612\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Cushion3\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":8\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Chair-sofa-leather\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.5088780522346497,\n\t\t\t\t\t0.46778422594070435,\n\t\t\t\t\t0.42869067192077637,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.3865979313850403\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Cushion\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":9\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Lampshader-outer\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":10\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Main-wallsocket\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-pic2\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":11\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Glass-Fake-picframe\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-pic2.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":12\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Blind-string-knob\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.266353964805603,\n\t\t\t\t\t0.21586070954799652,\n\t\t\t\t\t0.17464756965637207,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Blind-String\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Blind-ends\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Blind-wood-strip\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Blind-material\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.800000011920929\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Carpet\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":13\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"C-Table-box-legs\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.8999999761581421\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Chrome-Dull\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.25\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Rad-Knob-Outer\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.020288454368710518,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.5,\n\t\t\t\t\"roughnessFactor\":0.737113356590271\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Rad-Knob-Centre\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.15895980596542358,\n\t\t\t\t\t0.1589609980583191,\n\t\t\t\t\t0.15896092355251312,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.41237112879753113,\n\t\t\t\t\"roughnessFactor\":0.47250857949256897\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Rad-Panels-Enamel\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"White-pot.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Chrome-steel-dull\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"TeapotGlass\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0.4638318419456482,\n\t\t\t\t\t0.8004080057144165,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.30000001192092896\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-plastic\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.7990978956222534,\n\t\t\t\t\t0.7529430389404297,\n\t\t\t\t\t0.5457249879837036,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"bush-logo\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.03162277862429619\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-dialback\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-dial-red\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.20863580703735352,\n\t\t\t\t\t0.015996286645531654,\n\t\t\t\t\t0.010960102081298828,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-metal-ring\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.7990977764129639,\n\t\t\t\t\t0.7835385203361511,\n\t\t\t\t\t0.7230554819107056,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.43986254930496216\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_ior\":{\n\t\t\t\t\t\"ior\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-glass\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Handle-chrome\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.03162277862429619\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-inside\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-knob\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.44519850611686707,\n\t\t\t\t\t0.3762626051902771,\n\t\t\t\t\t0.16202951967716217,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.03162277862429619\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-suround\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.3004956841468811,\n\t\t\t\t\t0.11759431660175323,\n\t\t\t\t\t0.05351651459932327,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.9106529355049133,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-edges\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.029999999329447746\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Radio-handle\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.48514682054519653,\n\t\t\t\t\t0.46778425574302673,\n\t\t\t\t\t0.3662528097629547,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.19415807723999023\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"picframe-picc.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorTexture\":{\n\t\t\t\t\t\"index\":14\n\t\t\t\t},\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Steel\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.22360679507255554\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Black-Raught-Iron\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.10000000149011612\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Marble-harf\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.07680906355381012,\n\t\t\t\t\t0.07680906355381012,\n\t\t\t\t\t0.07680906355381012,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0.8556700944900513,\n\t\t\t\t\"roughnessFactor\":0.20790377259254456\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":500\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t0.25,\n\t\t\t\t1,\n\t\t\t\t0.338214635848999\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":3\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ShapesLight\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.9559678435325623,\n\t\t\t\t\t0.6307579278945923,\n\t\t\t\t\t0.3049875497817993,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.6780573129653931,\n\t\t\t\t0.25\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":5\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ShapesLight.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.9559678435325623,\n\t\t\t\t\t0.6307579278945923,\n\t\t\t\t\t0.3049875497817993,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"RedSphere\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.8003232479095459,\n\t\t\t\t\t0.009998607449233532,\n\t\t\t\t\t0.03754507377743721,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t0.8772789239883423,\n\t\t\t\t0.25,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":7\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ShapesLight.003\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.9559678435325623,\n\t\t\t\t\t0.6307579278945923,\n\t\t\t\t\t0.3049875497817993,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t0.25,\n\t\t\t\t0.3961591124534607,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":7\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"ShapesLight.002\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.9559678435325623,\n\t\t\t\t\t0.6307579278945923,\n\t\t\t\t\t0.3049875497817993,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"roughnessFactor\":0.20000000298023224\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_transmission\":{\n\t\t\t\t\t\"transmissionFactor\":1\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"GreenSphere\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.006491726264357567,\n\t\t\t\t\t0.8006302714347839,\n\t\t\t\t\t0,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":5\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"LargerDonut\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_clearcoat\":{\n\t\t\t\t\t\"clearcoatRoughnessFactor\":0\n\t\t\t\t},\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":10\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"SmallerDonut\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.7658302783966064,\n\t\t\t\t0.2915126085281372\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 3\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.5459583401679993,\n\t\t\t\t0.1512901932001114\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":30\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 1\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.18275855481624603,\n\t\t\t\t0.039912912994623184\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 2.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.18275855481624603,\n\t\t\t\t0.039912912994623184\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 2.002\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.18275855481624603,\n\t\t\t\t0.039912912994623184\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 2.003\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.5459583401679993,\n\t\t\t\t0.1512901932001114\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":30\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 1.002\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.5459583401679993,\n\t\t\t\t0.1512901932001114\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":30\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 1.003\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.7658302783966064,\n\t\t\t\t0.2915126085281372\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":30\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 3.001\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.18275855481624603,\n\t\t\t\t0.039912912994623184\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 2.004\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.18275855481624603,\n\t\t\t\t0.039912912994623184\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Poorman's Wood Fire 2.005\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"doubleSided\":true,\n\t\t\t\"emissiveFactor\":[\n\t\t\t\t1,\n\t\t\t\t0.75804549008447,\n\t\t\t\t0.4183522278860195\n\t\t\t],\n\t\t\t\"extensions\":{\n\t\t\t\t\"KHR_materials_emissive_strength\":{\n\t\t\t\t\t\"emissiveStrength\":2920.3181862831116\n\t\t\t\t}\n\t\t\t},\n\t\t\t\"name\":\"Material.002\",\n\t\t\t\"pbrMetallicRoughness\":{\n\t\t\t\t\"baseColorFactor\":[\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t0.800000011920929,\n\t\t\t\t\t1\n\t\t\t\t],\n\t\t\t\t\"metallicFactor\":0,\n\t\t\t\t\"roughnessFactor\":0.5\n\t\t\t}\n\t\t}\n\t],\n\t\"meshes\":[\n\t\t{\n\t\t\t\"name\":\"Plane.011\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":0,\n\t\t\t\t\t\t\"NORMAL\":1\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":2,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.010\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":3,\n\t\t\t\t\t\t\"NORMAL\":4,\n\t\t\t\t\t\t\"TEXCOORD_0\":5\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":6,\n\t\t\t\t\t\"material\":1\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.006\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":7,\n\t\t\t\t\t\t\"NORMAL\":8,\n\t\t\t\t\t\t\"TEXCOORD_0\":9\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":10,\n\t\t\t\t\t\"material\":2\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":11,\n\t\t\t\t\t\t\"NORMAL\":12\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":13,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.009\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":14,\n\t\t\t\t\t\t\"NORMAL\":15\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":16,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.007\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":17,\n\t\t\t\t\t\t\"NORMAL\":18\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":19,\n\t\t\t\t\t\"material\":1\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.004\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":20,\n\t\t\t\t\t\t\"NORMAL\":21\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":22,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":23,\n\t\t\t\t\t\t\"NORMAL\":24\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":25,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.053\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":26,\n\t\t\t\t\t\t\"NORMAL\":27\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":28,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.062\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":29,\n\t\t\t\t\t\t\"NORMAL\":30\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":31,\n\t\t\t\t\t\"material\":3\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.061\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":32,\n\t\t\t\t\t\t\"NORMAL\":33\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":31,\n\t\t\t\t\t\"material\":4\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":34,\n\t\t\t\t\t\t\"NORMAL\":35\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":36,\n\t\t\t\t\t\"material\":5\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":37,\n\t\t\t\t\t\t\"NORMAL\":38\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":39,\n\t\t\t\t\t\"material\":6\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":40,\n\t\t\t\t\t\t\"NORMAL\":41\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":42,\n\t\t\t\t\t\"material\":6\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":43,\n\t\t\t\t\t\t\"NORMAL\":44\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":45,\n\t\t\t\t\t\"material\":6\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":46,\n\t\t\t\t\t\t\"NORMAL\":47,\n\t\t\t\t\t\t\"TEXCOORD_0\":48\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":49,\n\t\t\t\t\t\"material\":7\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.004\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":50,\n\t\t\t\t\t\t\"NORMAL\":51,\n\t\t\t\t\t\t\"TEXCOORD_0\":52\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":53,\n\t\t\t\t\t\"material\":8\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.009\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":54,\n\t\t\t\t\t\t\"NORMAL\":55\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":56,\n\t\t\t\t\t\"material\":9\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.043\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":57,\n\t\t\t\t\t\t\"NORMAL\":58\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":59,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.134\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":60,\n\t\t\t\t\t\t\"NORMAL\":61\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":62,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.133\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":63,\n\t\t\t\t\t\t\"NORMAL\":64\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":62,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.132\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":65,\n\t\t\t\t\t\t\"NORMAL\":66\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":67,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.131\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":68,\n\t\t\t\t\t\t\"NORMAL\":69\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":62,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.130\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":70,\n\t\t\t\t\t\t\"NORMAL\":71\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":72,\n\t\t\t\t\t\"material\":10\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.129\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":73,\n\t\t\t\t\t\t\"NORMAL\":74\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":75,\n\t\t\t\t\t\"material\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.128\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":76,\n\t\t\t\t\t\t\"NORMAL\":77\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":78,\n\t\t\t\t\t\"material\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.126\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":79,\n\t\t\t\t\t\t\"NORMAL\":80\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":81,\n\t\t\t\t\t\"material\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.125\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":82,\n\t\t\t\t\t\t\"NORMAL\":83\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":84,\n\t\t\t\t\t\"material\":11\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.123\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":85,\n\t\t\t\t\t\t\"NORMAL\":86,\n\t\t\t\t\t\t\"TEXCOORD_0\":87\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":12\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.122\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":89,\n\t\t\t\t\t\t\"NORMAL\":90\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":91,\n\t\t\t\t\t\"material\":13\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.121\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":92,\n\t\t\t\t\t\t\"NORMAL\":93\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":91,\n\t\t\t\t\t\"material\":13\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.120\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":94,\n\t\t\t\t\t\t\"NORMAL\":95,\n\t\t\t\t\t\t\"TEXCOORD_0\":96\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":14\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.114\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":97,\n\t\t\t\t\t\t\"NORMAL\":98,\n\t\t\t\t\t\t\"TEXCOORD_0\":99\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":15\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.113\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":100,\n\t\t\t\t\t\t\"NORMAL\":101\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":91,\n\t\t\t\t\t\"material\":13\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.010\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":102,\n\t\t\t\t\t\t\"NORMAL\":103,\n\t\t\t\t\t\t\"TEXCOORD_0\":104\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":105,\n\t\t\t\t\t\"material\":16\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.011\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":106,\n\t\t\t\t\t\t\"NORMAL\":107\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":108,\n\t\t\t\t\t\"material\":17\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.039\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":109,\n\t\t\t\t\t\t\"NORMAL\":110\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":111,\n\t\t\t\t\t\"material\":17\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.038\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":112,\n\t\t\t\t\t\t\"NORMAL\":113\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":114,\n\t\t\t\t\t\"material\":17\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.034\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":115,\n\t\t\t\t\t\t\"NORMAL\":116\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":117,\n\t\t\t\t\t\"material\":18\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.012\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":118,\n\t\t\t\t\t\t\"NORMAL\":119,\n\t\t\t\t\t\t\"TEXCOORD_0\":120\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":121,\n\t\t\t\t\t\"material\":19\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.110\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":122,\n\t\t\t\t\t\t\"NORMAL\":123\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":91,\n\t\t\t\t\t\"material\":13\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.106\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":124,\n\t\t\t\t\t\t\"NORMAL\":125,\n\t\t\t\t\t\t\"TEXCOORD_0\":126\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.105\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":128,\n\t\t\t\t\t\t\"NORMAL\":129,\n\t\t\t\t\t\t\"TEXCOORD_0\":130\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.104\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":131,\n\t\t\t\t\t\t\"NORMAL\":132,\n\t\t\t\t\t\t\"TEXCOORD_0\":133\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.103\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":134,\n\t\t\t\t\t\t\"NORMAL\":135,\n\t\t\t\t\t\t\"TEXCOORD_0\":136\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.102\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":137,\n\t\t\t\t\t\t\"NORMAL\":138,\n\t\t\t\t\t\t\"TEXCOORD_0\":139\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.101\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":140,\n\t\t\t\t\t\t\"NORMAL\":141,\n\t\t\t\t\t\t\"TEXCOORD_0\":142\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.100\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":143,\n\t\t\t\t\t\t\"NORMAL\":144,\n\t\t\t\t\t\t\"TEXCOORD_0\":145\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.099\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":146,\n\t\t\t\t\t\t\"NORMAL\":147,\n\t\t\t\t\t\t\"TEXCOORD_0\":148\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.098\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":149,\n\t\t\t\t\t\t\"NORMAL\":150,\n\t\t\t\t\t\t\"TEXCOORD_0\":151\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.097\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":152,\n\t\t\t\t\t\t\"NORMAL\":153,\n\t\t\t\t\t\t\"TEXCOORD_0\":154\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.096\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":155,\n\t\t\t\t\t\t\"NORMAL\":156,\n\t\t\t\t\t\t\"TEXCOORD_0\":157\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.095\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":158,\n\t\t\t\t\t\t\"NORMAL\":159,\n\t\t\t\t\t\t\"TEXCOORD_0\":160\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.094\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":161,\n\t\t\t\t\t\t\"NORMAL\":162,\n\t\t\t\t\t\t\"TEXCOORD_0\":163\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.093\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":164,\n\t\t\t\t\t\t\"NORMAL\":165,\n\t\t\t\t\t\t\"TEXCOORD_0\":166\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.092\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":167,\n\t\t\t\t\t\t\"NORMAL\":168,\n\t\t\t\t\t\t\"TEXCOORD_0\":169\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.091\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":170,\n\t\t\t\t\t\t\"NORMAL\":171,\n\t\t\t\t\t\t\"TEXCOORD_0\":172\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.090\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":173,\n\t\t\t\t\t\t\"NORMAL\":174,\n\t\t\t\t\t\t\"TEXCOORD_0\":175\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.089\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":176,\n\t\t\t\t\t\t\"NORMAL\":177,\n\t\t\t\t\t\t\"TEXCOORD_0\":178\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":127,\n\t\t\t\t\t\"material\":20\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.088\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":179,\n\t\t\t\t\t\t\"NORMAL\":180,\n\t\t\t\t\t\t\"TEXCOORD_0\":181\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":182,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.086\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":183,\n\t\t\t\t\t\t\"NORMAL\":184,\n\t\t\t\t\t\t\"TEXCOORD_0\":185\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":186,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.085\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":187,\n\t\t\t\t\t\t\"NORMAL\":188,\n\t\t\t\t\t\t\"TEXCOORD_0\":189\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":182,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.083\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":190,\n\t\t\t\t\t\t\"NORMAL\":191\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":192,\n\t\t\t\t\t\"material\":22\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.082\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":193,\n\t\t\t\t\t\t\"NORMAL\":194,\n\t\t\t\t\t\t\"TEXCOORD_0\":195\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":196,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.080\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":197,\n\t\t\t\t\t\t\"NORMAL\":198,\n\t\t\t\t\t\t\"TEXCOORD_0\":199\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":200,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.079\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":201,\n\t\t\t\t\t\t\"NORMAL\":202\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":203,\n\t\t\t\t\t\"material\":23\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":204,\n\t\t\t\t\t\t\"NORMAL\":205\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":31,\n\t\t\t\t\t\"material\":24\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.078\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":206,\n\t\t\t\t\t\t\"NORMAL\":207\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":208\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.013\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":209,\n\t\t\t\t\t\t\"NORMAL\":210,\n\t\t\t\t\t\t\"TEXCOORD_0\":211\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":212,\n\t\t\t\t\t\"material\":25\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.014\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":213,\n\t\t\t\t\t\t\"NORMAL\":214\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":215,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.023\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":216,\n\t\t\t\t\t\t\"NORMAL\":217\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":218,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.008\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":219,\n\t\t\t\t\t\t\"NORMAL\":220\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":221,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.015\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":222,\n\t\t\t\t\t\t\"NORMAL\":223,\n\t\t\t\t\t\t\"TEXCOORD_0\":224\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":225,\n\t\t\t\t\t\"material\":27\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.016\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":226,\n\t\t\t\t\t\t\"NORMAL\":227,\n\t\t\t\t\t\t\"TEXCOORD_0\":228\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":229,\n\t\t\t\t\t\"material\":28\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.077\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":230,\n\t\t\t\t\t\t\"NORMAL\":231\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.076\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":232,\n\t\t\t\t\t\t\"NORMAL\":233\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":234\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.074\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":235,\n\t\t\t\t\t\t\"NORMAL\":236\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":237,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.072\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":238,\n\t\t\t\t\t\t\"NORMAL\":239\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":240,\n\t\t\t\t\t\"material\":22\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.071\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":241,\n\t\t\t\t\t\t\"NORMAL\":242,\n\t\t\t\t\t\t\"TEXCOORD_0\":243\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":244,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.017\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":245,\n\t\t\t\t\t\t\"NORMAL\":246\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":247,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.022\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":248,\n\t\t\t\t\t\t\"NORMAL\":249\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":250,\n\t\t\t\t\t\"material\":29\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.069\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":251,\n\t\t\t\t\t\t\"NORMAL\":252\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":253,\n\t\t\t\t\t\"material\":30\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.068\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":254,\n\t\t\t\t\t\t\"NORMAL\":255,\n\t\t\t\t\t\t\"TEXCOORD_0\":256\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":31\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.048\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":257,\n\t\t\t\t\t\t\"NORMAL\":258\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":32\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.047\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":259,\n\t\t\t\t\t\t\"NORMAL\":260,\n\t\t\t\t\t\t\"TEXCOORD_0\":261\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":33\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.042\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":262,\n\t\t\t\t\t\t\"NORMAL\":263\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":264,\n\t\t\t\t\t\"material\":30\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.018\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":265,\n\t\t\t\t\t\t\"NORMAL\":266\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":267,\n\t\t\t\t\t\"material\":34\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.028\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":268,\n\t\t\t\t\t\t\"NORMAL\":269\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":270,\n\t\t\t\t\t\"material\":35\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.046\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":271,\n\t\t\t\t\t\t\"NORMAL\":272\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":273,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.045\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":274,\n\t\t\t\t\t\t\"NORMAL\":275\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":276,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.019\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":277,\n\t\t\t\t\t\t\"NORMAL\":278\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":279,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.020\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":280,\n\t\t\t\t\t\t\"NORMAL\":281\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":282,\n\t\t\t\t\t\"material\":37\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.021\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":283,\n\t\t\t\t\t\t\"NORMAL\":284\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":285,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.022\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":286,\n\t\t\t\t\t\t\"NORMAL\":287\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":288,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.023\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":289,\n\t\t\t\t\t\t\"NORMAL\":290\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":288,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.024\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":291,\n\t\t\t\t\t\t\"NORMAL\":292\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":293,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.025\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":294,\n\t\t\t\t\t\t\"NORMAL\":295\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":282,\n\t\t\t\t\t\"material\":37\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.026\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":296,\n\t\t\t\t\t\t\"NORMAL\":297\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":279,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.044\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":298,\n\t\t\t\t\t\t\"NORMAL\":299\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":276,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.043\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":300,\n\t\t\t\t\t\t\"NORMAL\":301\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":273,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.019\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":302,\n\t\t\t\t\t\t\"NORMAL\":303\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":270,\n\t\t\t\t\t\"material\":35\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.027\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":304,\n\t\t\t\t\t\t\"NORMAL\":305\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":267,\n\t\t\t\t\t\"material\":34\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.028\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":306,\n\t\t\t\t\t\t\"NORMAL\":307\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":267,\n\t\t\t\t\t\"material\":34\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Circle.010\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":308,\n\t\t\t\t\t\t\"NORMAL\":309\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":270,\n\t\t\t\t\t\"material\":35\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.040\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":310,\n\t\t\t\t\t\t\"NORMAL\":311\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":273,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.039\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":312,\n\t\t\t\t\t\t\"NORMAL\":313\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":276,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.029\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":314,\n\t\t\t\t\t\t\"NORMAL\":315\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":316,\n\t\t\t\t\t\"material\":36\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.030\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":317,\n\t\t\t\t\t\t\"NORMAL\":318\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":319,\n\t\t\t\t\t\"material\":37\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.031\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":320,\n\t\t\t\t\t\t\"NORMAL\":321\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":322,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.032\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":323,\n\t\t\t\t\t\t\"NORMAL\":324\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":325,\n\t\t\t\t\t\"material\":38\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":326,\n\t\t\t\t\t\t\"NORMAL\":327\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":328,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.005\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":329,\n\t\t\t\t\t\t\"NORMAL\":330\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":331,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.033\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":332,\n\t\t\t\t\t\t\"NORMAL\":333\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":334,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.026\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":335,\n\t\t\t\t\t\t\"NORMAL\":336\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":337,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.027\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":338,\n\t\t\t\t\t\t\"NORMAL\":339\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":340,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.034\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":341,\n\t\t\t\t\t\t\"NORMAL\":342\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":343,\n\t\t\t\t\t\"material\":26\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.035\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":344,\n\t\t\t\t\t\t\"NORMAL\":345,\n\t\t\t\t\t\t\"TEXCOORD_0\":346\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":347,\n\t\t\t\t\t\"material\":16\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.036\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":348,\n\t\t\t\t\t\t\"NORMAL\":349,\n\t\t\t\t\t\t\"TEXCOORD_0\":350\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":351,\n\t\t\t\t\t\"material\":39\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.033\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":352,\n\t\t\t\t\t\t\"NORMAL\":353,\n\t\t\t\t\t\t\"TEXCOORD_0\":354\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":182,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.034\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":355,\n\t\t\t\t\t\t\"NORMAL\":356\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":357,\n\t\t\t\t\t\"material\":22\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.037\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":358,\n\t\t\t\t\t\t\"NORMAL\":359,\n\t\t\t\t\t\t\"TEXCOORD_0\":360\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":361,\n\t\t\t\t\t\"material\":40\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.036\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":362,\n\t\t\t\t\t\t\"NORMAL\":363,\n\t\t\t\t\t\t\"TEXCOORD_0\":364\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":365,\n\t\t\t\t\t\"material\":21\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.038\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":366,\n\t\t\t\t\t\t\"NORMAL\":367\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":368,\n\t\t\t\t\t\"material\":41\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.039\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":369,\n\t\t\t\t\t\t\"NORMAL\":370\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":368,\n\t\t\t\t\t\"material\":41\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.040\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":371,\n\t\t\t\t\t\t\"NORMAL\":372\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":373,\n\t\t\t\t\t\"material\":42\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":374,\n\t\t\t\t\t\t\"NORMAL\":375\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":376,\n\t\t\t\t\t\"material\":43\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.041\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":377,\n\t\t\t\t\t\t\"NORMAL\":378\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":379,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.042\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":380,\n\t\t\t\t\t\t\"NORMAL\":381\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":382\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.043\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":383,\n\t\t\t\t\t\t\"NORMAL\":384\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":385,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.044\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":386,\n\t\t\t\t\t\t\"NORMAL\":387\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":388\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.055\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":389,\n\t\t\t\t\t\t\"NORMAL\":390\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":391,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.045\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":392,\n\t\t\t\t\t\t\"NORMAL\":393\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":394,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.046\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":395,\n\t\t\t\t\t\t\"NORMAL\":396\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":397,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.047\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":398,\n\t\t\t\t\t\t\"NORMAL\":399\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":400,\n\t\t\t\t\t\"material\":44\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.048\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":401,\n\t\t\t\t\t\t\"NORMAL\":402\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":403,\n\t\t\t\t\t\"material\":45\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.049\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":404,\n\t\t\t\t\t\t\"NORMAL\":405\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":406,\n\t\t\t\t\t\"material\":45\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.050\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":407,\n\t\t\t\t\t\t\"NORMAL\":408\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":409,\n\t\t\t\t\t\"material\":46\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.051\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":410,\n\t\t\t\t\t\t\"NORMAL\":411\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":412,\n\t\t\t\t\t\"material\":47\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.052\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":413,\n\t\t\t\t\t\t\"NORMAL\":414\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":415,\n\t\t\t\t\t\"material\":47\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.054\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":416,\n\t\t\t\t\t\t\"NORMAL\":417\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":418,\n\t\t\t\t\t\"material\":48\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Curve.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":419,\n\t\t\t\t\t\t\"NORMAL\":420,\n\t\t\t\t\t\t\"TEXCOORD_0\":421\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":422,\n\t\t\t\t\t\"material\":49\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.055\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":423,\n\t\t\t\t\t\t\"NORMAL\":424,\n\t\t\t\t\t\t\"TEXCOORD_0\":425\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":426,\n\t\t\t\t\t\"material\":50\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":427,\n\t\t\t\t\t\t\"NORMAL\":428,\n\t\t\t\t\t\t\"TEXCOORD_0\":429\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":430,\n\t\t\t\t\t\"material\":51\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.056\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":431,\n\t\t\t\t\t\t\"NORMAL\":432\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":433,\n\t\t\t\t\t\"material\":52\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.057\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":434,\n\t\t\t\t\t\t\"NORMAL\":435\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":436,\n\t\t\t\t\t\"material\":53\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.058\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":437,\n\t\t\t\t\t\t\"NORMAL\":438\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":439,\n\t\t\t\t\t\"material\":48\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.059\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":440,\n\t\t\t\t\t\t\"NORMAL\":441\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":442,\n\t\t\t\t\t\"material\":48\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.060\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":443,\n\t\t\t\t\t\t\"NORMAL\":444\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":445,\n\t\t\t\t\t\"material\":54\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.058\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":446,\n\t\t\t\t\t\t\"NORMAL\":447\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":448,\n\t\t\t\t\t\"material\":55\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.062\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":449,\n\t\t\t\t\t\t\"NORMAL\":450\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":451,\n\t\t\t\t\t\"material\":56\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.063\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":452,\n\t\t\t\t\t\t\"NORMAL\":453\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":454,\n\t\t\t\t\t\"material\":57\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.064\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":455,\n\t\t\t\t\t\t\"NORMAL\":456\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":457,\n\t\t\t\t\t\"material\":58\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.065\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":458,\n\t\t\t\t\t\t\"NORMAL\":459\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":460,\n\t\t\t\t\t\"material\":58\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.053\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":461,\n\t\t\t\t\t\t\"NORMAL\":462\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":463,\n\t\t\t\t\t\"material\":59\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.063\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":464,\n\t\t\t\t\t\t\"NORMAL\":465,\n\t\t\t\t\t\t\"TEXCOORD_0\":466\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":88,\n\t\t\t\t\t\"material\":60\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.066\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":467,\n\t\t\t\t\t\t\"NORMAL\":468\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":469,\n\t\t\t\t\t\"material\":61\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.067\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":470,\n\t\t\t\t\t\t\"NORMAL\":471\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":472,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.068\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":473,\n\t\t\t\t\t\t\"NORMAL\":474\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":475,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.069\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":476,\n\t\t\t\t\t\t\"NORMAL\":477\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":478,\n\t\t\t\t\t\"material\":61\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.070\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":479,\n\t\t\t\t\t\t\"NORMAL\":480\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":481,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.071\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":482,\n\t\t\t\t\t\t\"NORMAL\":483\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":484,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.017\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":485,\n\t\t\t\t\t\t\"NORMAL\":486\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":487,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.072\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":488,\n\t\t\t\t\t\t\"NORMAL\":489\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":490,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Mesh.073\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":491,\n\t\t\t\t\t\t\"NORMAL\":492\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":493,\n\t\t\t\t\t\"material\":62\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.016\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":494,\n\t\t\t\t\t\t\"NORMAL\":495\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":496,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.015\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":497,\n\t\t\t\t\t\t\"NORMAL\":498\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":499,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.014\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":500,\n\t\t\t\t\t\t\"NORMAL\":501\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":502,\n\t\t\t\t\t\"material\":63\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.013\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":503,\n\t\t\t\t\t\t\"NORMAL\":504\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":505,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Plane.018\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":506,\n\t\t\t\t\t\t\"NORMAL\":507\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":508,\n\t\t\t\t\t\"material\":0\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":509,\n\t\t\t\t\t\t\"NORMAL\":510,\n\t\t\t\t\t\t\"TEXCOORD_0\":511\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":512,\n\t\t\t\t\t\"material\":64\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Icosphere.009\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":513,\n\t\t\t\t\t\t\"NORMAL\":514,\n\t\t\t\t\t\t\"TEXCOORD_0\":515\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":516,\n\t\t\t\t\t\"material\":65\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Icosphere.008\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":517,\n\t\t\t\t\t\t\"NORMAL\":518,\n\t\t\t\t\t\t\"TEXCOORD_0\":519\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":520,\n\t\t\t\t\t\"material\":66\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":521,\n\t\t\t\t\t\t\"NORMAL\":522,\n\t\t\t\t\t\t\"TEXCOORD_0\":523\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":524,\n\t\t\t\t\t\"material\":67\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Icosphere.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":525,\n\t\t\t\t\t\t\"NORMAL\":526,\n\t\t\t\t\t\t\"TEXCOORD_0\":527\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":520,\n\t\t\t\t\t\"material\":68\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Icosphere.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":528,\n\t\t\t\t\t\t\"NORMAL\":529,\n\t\t\t\t\t\t\"TEXCOORD_0\":530\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":520,\n\t\t\t\t\t\"material\":69\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":531,\n\t\t\t\t\t\t\"NORMAL\":532,\n\t\t\t\t\t\t\"TEXCOORD_0\":533\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":524,\n\t\t\t\t\t\"material\":70\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Torus\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":534,\n\t\t\t\t\t\t\"NORMAL\":535,\n\t\t\t\t\t\t\"TEXCOORD_0\":536\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":537,\n\t\t\t\t\t\"material\":71\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Torus.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":538,\n\t\t\t\t\t\t\"NORMAL\":539,\n\t\t\t\t\t\t\"TEXCOORD_0\":540\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":541,\n\t\t\t\t\t\"material\":72\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":542,\n\t\t\t\t\t\t\"NORMAL\":543,\n\t\t\t\t\t\t\"TEXCOORD_0\":544\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":73\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.001\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":546,\n\t\t\t\t\t\t\"NORMAL\":547,\n\t\t\t\t\t\t\"TEXCOORD_0\":548\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":74\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.002\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":549,\n\t\t\t\t\t\t\"NORMAL\":550,\n\t\t\t\t\t\t\"TEXCOORD_0\":551\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":75\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":552,\n\t\t\t\t\t\t\"NORMAL\":553,\n\t\t\t\t\t\t\"TEXCOORD_0\":554\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":76\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.004\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":555,\n\t\t\t\t\t\t\"NORMAL\":556,\n\t\t\t\t\t\t\"TEXCOORD_0\":557\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":77\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.005\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":558,\n\t\t\t\t\t\t\"NORMAL\":559,\n\t\t\t\t\t\t\"TEXCOORD_0\":560\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":78\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.006\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":561,\n\t\t\t\t\t\t\"NORMAL\":562,\n\t\t\t\t\t\t\"TEXCOORD_0\":563\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":79\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.007\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":564,\n\t\t\t\t\t\t\"NORMAL\":565,\n\t\t\t\t\t\t\"TEXCOORD_0\":566\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":80\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.008\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":567,\n\t\t\t\t\t\t\"NORMAL\":568,\n\t\t\t\t\t\t\"TEXCOORD_0\":569\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":81\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Cube.009\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":570,\n\t\t\t\t\t\t\"NORMAL\":571,\n\t\t\t\t\t\t\"TEXCOORD_0\":572\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":545,\n\t\t\t\t\t\"material\":82\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t\t{\n\t\t\t\"name\":\"Sphere.003\",\n\t\t\t\"primitives\":[\n\t\t\t\t{\n\t\t\t\t\t\"attributes\":{\n\t\t\t\t\t\t\"POSITION\":573,\n\t\t\t\t\t\t\"NORMAL\":574,\n\t\t\t\t\t\t\"TEXCOORD_0\":575\n\t\t\t\t\t},\n\t\t\t\t\t\"indices\":512,\n\t\t\t\t\t\"material\":83\n\t\t\t\t}\n\t\t\t]\n\t\t}\n\t],\n\t\"textures\":[\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":0\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":1\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":2\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":3\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":4\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":5\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":6\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":7\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":8\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":9\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":10\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":11\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":12\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":13\n\t\t},\n\t\t{\n\t\t\t\"sampler\":0,\n\t\t\t\"source\":14\n\t\t}\n\t],\n\t\"images\":[\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"WoodFloor051_2K-JPG_Color_reduced\",\n\t\t\t\"uri\":\"the-white-room/WoodFloor051_2K-JPG_Color_reduced.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"magazine\",\n\t\t\t\"uri\":\"the-white-room/magazine.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"photo3\",\n\t\t\t\"uri\":\"the-white-room/photo3.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"photo4\",\n\t\t\t\"uri\":\"the-white-room/photo4.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"photo1\",\n\t\t\t\"uri\":\"the-white-room/photo1.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"cushion-stripe-purple\",\n\t\t\t\"uri\":\"the-white-room/cushion-stripe-purple.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"Shade-paper\",\n\t\t\t\"uri\":\"the-white-room/Shade-paper.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"book-spines\",\n\t\t\t\"uri\":\"the-white-room/book-spines.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"cushion-purple-yellow-stripe\",\n\t\t\t\"uri\":\"the-white-room/cushion-purple-yellow-stripe.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"cushion-green-circles\",\n\t\t\t\"uri\":\"the-white-room/cushion-green-circles.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"shade-stripes\",\n\t\t\t\"uri\":\"the-white-room/shade-stripes.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"pic5wide\",\n\t\t\t\"uri\":\"the-white-room/pic5wide.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"pic3\",\n\t\t\t\"uri\":\"the-white-room/pic3.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"carpet-text3b\",\n\t\t\t\"uri\":\"the-white-room/carpet-text3b.jpg\"\n\t\t},\n\t\t{\n\t\t\t\"mimeType\":\"image/jpeg\",\n\t\t\t\"name\":\"photo2\",\n\t\t\t\"uri\":\"the-white-room/photo2.jpg\"\n\t\t}\n\t],\n\t\"accessors\":[\n\t\t{\n\t\t\t\"bufferView\":0,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":648,\n\t\t\t\"max\":[\n\t\t\t\t4.922062397003174,\n\t\t\t\t4.82349157333374,\n\t\t\t\t0.010916324332356453\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5326654314994812,\n\t\t\t\t-2.162641763687134,\n\t\t\t\t-0.04265495017170906\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":1,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":648,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":2,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":936,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":3,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t3.5748255252838135,\n\t\t\t\t0,\n\t\t\t\t4.858819484710693\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-2.0094664096832275,\n\t\t\t\t0,\n\t\t\t\t-2.792449951171875\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":4,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":5,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":6,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":198,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":7,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t3.5019261837005615,\n\t\t\t\t0,\n\t\t\t\t4.7957072257995605\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.968488335609436,\n\t\t\t\t0,\n\t\t\t\t-2.7355048656463623\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":8,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":9,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":10,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":198,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":11,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":950,\n\t\t\t\"max\":[\n\t\t\t\t5.455154895782471,\n\t\t\t\t3.2512876987457275,\n\t\t\t\t-0.00025314855156466365\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.00048618417349644005,\n\t\t\t\t-2.7012579441070557,\n\t\t\t\t-0.16332300007343292\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":12,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":950,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":13,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1716,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":14,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2384,\n\t\t\t\"max\":[\n\t\t\t\t5.891605377197266,\n\t\t\t\t0.08312232792377472,\n\t\t\t\t0.08869484812021255\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.6215317249298096,\n\t\t\t\t-5.371967315673828,\n\t\t\t\t-0.08274871110916138\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":15,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2384,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":16,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3780,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":17,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1864,\n\t\t\t\"max\":[\n\t\t\t\t2.9372189044952393,\n\t\t\t\t3.1419870853424072,\n\t\t\t\t5.128809928894043\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-2.7930970191955566,\n\t\t\t\t-1.24563757708529e-06,\n\t\t\t\t-2.6410415172576904\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":18,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1864,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":19,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3774,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":20,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12640,\n\t\t\t\"max\":[\n\t\t\t\t0.5476992130279541,\n\t\t\t\t0.17167966067790985,\n\t\t\t\t0.6571645736694336\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5477049350738525,\n\t\t\t\t-0.054475005716085434,\n\t\t\t\t-2.3420510292053223\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":21,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12640,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":22,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":18660,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":23,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12645,\n\t\t\t\"max\":[\n\t\t\t\t0.5476990938186646,\n\t\t\t\t0.17167964577674866,\n\t\t\t\t0.6571645736694336\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5477047562599182,\n\t\t\t\t-0.05447499826550484,\n\t\t\t\t-2.3420510292053223\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":24,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12645,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":25,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":18660,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":26,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12662,\n\t\t\t\"max\":[\n\t\t\t\t0.696713387966156,\n\t\t\t\t0.17167964577674866,\n\t\t\t\t0.6571645736694336\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6954436302185059,\n\t\t\t\t-0.054475005716085434,\n\t\t\t\t-2.3420510292053223\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":27,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12662,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":28,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":18660,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":29,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.9999999403953552,\n\t\t\t\t0,\n\t\t\t\t0.8618231415748596\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t-2.2511860819918184e-08,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":30,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":31,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":32,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.9999999403953552,\n\t\t\t\t0,\n\t\t\t\t0.8618231415748596\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t-2.2511860819918184e-08,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":33,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":34,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":374,\n\t\t\t\"max\":[\n\t\t\t\t1.1131902933120728,\n\t\t\t\t7.659307479858398,\n\t\t\t\t1.1132620573043823\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.1132620573043823,\n\t\t\t\t-0.0019058843608945608,\n\t\t\t\t-1.1132620573043823\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":35,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":374,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":36,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2148,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":37,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":554,\n\t\t\t\"max\":[\n\t\t\t\t0.6911384463310242,\n\t\t\t\t0.9967806935310364,\n\t\t\t\t0.6907230615615845\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6903073191642761,\n\t\t\t\t0.0020614692475646734,\n\t\t\t\t-0.6907227635383606\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":38,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":554,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":39,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3312,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":40,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":602,\n\t\t\t\"max\":[\n\t\t\t\t0.6978312730789185,\n\t\t\t\t1.2345601320266724,\n\t\t\t\t0.6981743574142456\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6985169649124146,\n\t\t\t\t0.0007714119856245816,\n\t\t\t\t-0.6981739401817322\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":41,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":602,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":42,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3600,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":43,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":650,\n\t\t\t\"max\":[\n\t\t\t\t0.9311015009880066,\n\t\t\t\t1.7556588649749756,\n\t\t\t\t0.9311015009880066\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9311015009880066,\n\t\t\t\t0.0005759156192652881,\n\t\t\t\t-0.9311015009880066\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":44,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":650,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":45,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3888,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":46,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":104,\n\t\t\t\"max\":[\n\t\t\t\t0.9999999403953552,\n\t\t\t\t0.0006058623548597097,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.003533124923706,\n\t\t\t\t-0.03656959533691406,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":47,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":104,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":48,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":104,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":49,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":228,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":50,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":459,\n\t\t\t\"max\":[\n\t\t\t\t0.2668321430683136,\n\t\t\t\t0.20129023492336273,\n\t\t\t\t0.26803481578826904\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2691414952278137,\n\t\t\t\t-0.20129023492336273,\n\t\t\t\t-0.2679380476474762\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":51,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":459,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":52,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":459,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":53,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2304,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":54,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2114,\n\t\t\t\"max\":[\n\t\t\t\t1.6868435144424438,\n\t\t\t\t0.5824713110923767,\n\t\t\t\t1.6868435144424438\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.6868435144424438,\n\t\t\t\t-0.048781149089336395,\n\t\t\t\t-1.6868433952331543\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":55,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2114,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":56,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":12672,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":57,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":18,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t8.977396965026855,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t0,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":58,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":18,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":59,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":60,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.3340943455696106,\n\t\t\t\t-0.04397451877593994,\n\t\t\t\t0.5399194955825806\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.33409440517425537,\n\t\t\t\t-0.154801607131958,\n\t\t\t\t-0.5399197340011597\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":61,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":62,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":132,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":63,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.15047866106033325,\n\t\t\t\t0.02035975456237793,\n\t\t\t\t0.2590247690677643\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.33409440517425537,\n\t\t\t\t-0.15337657928466797,\n\t\t\t\t-0.5399197340011597\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":64,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":65,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.22951847314834595,\n\t\t\t\t0,\n\t\t\t\t0.2590247690677643\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.33409440517425537,\n\t\t\t\t-0.15473389625549316,\n\t\t\t\t-0.5399197340011597\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":66,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":67,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":132,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":68,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.15777474641799927,\n\t\t\t\t0.030539510771632195,\n\t\t\t\t0.27240052819252014\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.33409440517425537,\n\t\t\t\t-0.15337657928466797,\n\t\t\t\t-0.5399197340011597\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":69,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":70,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.3340943455696106,\n\t\t\t\t0,\n\t\t\t\t0.5399194955825806\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.33409440517425537,\n\t\t\t\t-0.154801607131958,\n\t\t\t\t-0.5399197340011597\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":71,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":72,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":132,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":73,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":790,\n\t\t\t\"max\":[\n\t\t\t\t0.1862427443265915,\n\t\t\t\t0,\n\t\t\t\t0.36614668369293213\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.19704250991344452,\n\t\t\t\t-0.07045507431030273,\n\t\t\t\t-0.36636513471603394\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":74,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":790,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":75,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2208,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":76,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":852,\n\t\t\t\"max\":[\n\t\t\t\t1.2040683031082153,\n\t\t\t\t0,\n\t\t\t\t0.9120947122573853\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.7216556072235107,\n\t\t\t\t-0.07064461708068848,\n\t\t\t\t0.2003590315580368\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":77,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":852,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":78,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2052,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":79,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":704,\n\t\t\t\"max\":[\n\t\t\t\t0.21129059791564941,\n\t\t\t\t0,\n\t\t\t\t0.36592167615890503\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21129059791564941,\n\t\t\t\t-0.07028055191040039,\n\t\t\t\t-0.36592167615890503\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":80,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":704,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":81,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2112,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":82,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":916,\n\t\t\t\"max\":[\n\t\t\t\t0.43959543108940125,\n\t\t\t\t0,\n\t\t\t\t0.9227252006530762\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.18019215762615204,\n\t\t\t\t-0.0712590217590332,\n\t\t\t\t-0.07826060056686401\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":83,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":916,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":84,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2532,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":85,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.08039847761392593,\n\t\t\t\t-0.004371070768684149,\n\t\t\t\t0.10489880293607712\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08039848506450653,\n\t\t\t\t-0.0043710870668292046,\n\t\t\t\t-0.13081341981887817\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":86,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":87,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":88,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":89,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"max\":[\n\t\t\t\t0.0932014212012291,\n\t\t\t\t-5.687807060894556e-06,\n\t\t\t\t0.11835099011659622\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09320143610239029,\n\t\t\t\t-0.004655706696212292,\n\t\t\t\t-0.14590656757354736\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":90,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":91,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":92,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"max\":[\n\t\t\t\t0.0932014212012291,\n\t\t\t\t-5.687807060894556e-06,\n\t\t\t\t0.11835099011659622\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09320143610239029,\n\t\t\t\t-0.004655706696212292,\n\t\t\t\t-0.14590656757354736\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":93,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":94,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.08039847761392593,\n\t\t\t\t-0.004371070768684149,\n\t\t\t\t0.10489880293607712\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08039848506450653,\n\t\t\t\t-0.0043710870668292046,\n\t\t\t\t-0.13081341981887817\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":95,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":96,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":97,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.08039847761392593,\n\t\t\t\t-0.004371070768684149,\n\t\t\t\t0.10489880293607712\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08039848506450653,\n\t\t\t\t-0.0043710870668292046,\n\t\t\t\t-0.13081341981887817\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":98,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":99,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":100,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"max\":[\n\t\t\t\t0.0932014212012291,\n\t\t\t\t-5.687807060894556e-06,\n\t\t\t\t0.11835099011659622\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09320143610239029,\n\t\t\t\t-0.004655706696212292,\n\t\t\t\t-0.14590656757354736\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":101,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":102,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":639,\n\t\t\t\"max\":[\n\t\t\t\t0.21286319196224213,\n\t\t\t\t0.055435582995414734,\n\t\t\t\t0.21552662551403046\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21678972244262695,\n\t\t\t\t-0.060694679617881775,\n\t\t\t\t-0.20884688198566437\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":103,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":639,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":104,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":639,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":105,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2517,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":106,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":64,\n\t\t\t\"max\":[\n\t\t\t\t0.07242931425571442,\n\t\t\t\t-0.0021060851868242025,\n\t\t\t\t0.07242931425571442\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.07242932170629501,\n\t\t\t\t-0.046004775911569595,\n\t\t\t\t-0.07242931425571442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":107,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":64,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":108,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":306,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":109,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":72,\n\t\t\t\"max\":[\n\t\t\t\t0.027787016704678535,\n\t\t\t\t0.016154149547219276,\n\t\t\t\t0.027787016704678535\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.027787016704678535,\n\t\t\t\t-0.07282548397779465,\n\t\t\t\t-0.027787016704678535\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":110,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":72,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":111,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":112,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"max\":[\n\t\t\t\t0.0025792610831558704,\n\t\t\t\t0.4033385217189789,\n\t\t\t\t0.0029782739002257586\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0025792610831558704,\n\t\t\t\t0,\n\t\t\t\t-0.0029782739002257586\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":113,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":114,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":115,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"max\":[\n\t\t\t\t0.08703261613845825,\n\t\t\t\t0.003850689623504877,\n\t\t\t\t0.015090188942849636\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08703261613845825,\n\t\t\t\t-0.5693657398223877,\n\t\t\t\t-0.002090851776301861\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":116,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":117,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":118,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1876,\n\t\t\t\"max\":[\n\t\t\t\t0.2814808189868927,\n\t\t\t\t0.2795661389827728,\n\t\t\t\t0.28042706847190857\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.28073644638061523,\n\t\t\t\t-0.2794150114059448,\n\t\t\t\t-0.27962353825569153\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":119,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1876,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":120,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1876,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":121,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":8064,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":122,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"max\":[\n\t\t\t\t0.0932014212012291,\n\t\t\t\t-5.687807060894556e-06,\n\t\t\t\t0.11835099011659622\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09320143610239029,\n\t\t\t\t-0.004655706696212292,\n\t\t\t\t-0.14590656757354736\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":123,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":124,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.8540287017822266,\n\t\t\t\t0.480212926864624\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5835559368133545,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":125,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":126,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":127,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":84,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":128,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.0786960124969482,\n\t\t\t\t0.40594857931137085\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.5949207544326782,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":129,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":130,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":131,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5835559368133545,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":132,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":133,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":134,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.3316842317581177\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5835559368133545,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":135,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":136,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":137,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t1.2279832363128662,\n\t\t\t\t0.480212926864624\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.16604459285736084,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":138,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":139,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":140,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.8109545707702637,\n\t\t\t\t0.257451593875885\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":141,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":142,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":143,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.1000568866729736,\n\t\t\t\t1.1368076801300049\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":144,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":145,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":146,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.7930243015289307,\n\t\t\t\t0.27076077461242676\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":147,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":148,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":149,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.2139461040496826,\n\t\t\t\t0.4390467405319214\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5002657175064087,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":150,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":151,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":152,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":153,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":154,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":155,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t1.9551841020584106,\n\t\t\t\t1.8975250720977783\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21470481157302856,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":156,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":157,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":158,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t1.880414366722107,\n\t\t\t\t0.8317139744758606\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.22660309076309204,\n\t\t\t\t0,\n\t\t\t\t-1.1682863235473633\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":159,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":160,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":161,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.7427144050598145,\n\t\t\t\t0.10247480869293213\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":162,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":163,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":164,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.7427144050598145,\n\t\t\t\t0.10247480869293213\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":165,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":166,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":167,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t1.449264407157898,\n\t\t\t\t0.38295137882232666\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0005335807800292969,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":168,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":169,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":170,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.6430703401565552,\n\t\t\t\t-4.227981499832091e-18,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":171,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":172,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":173,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":174,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":175,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":176,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"max\":[\n\t\t\t\t1.0095903873443604,\n\t\t\t\t2.4986674785614014,\n\t\t\t\t0.9999999403953552\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t0,\n\t\t\t\t-1.0000003576278687\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":177,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":178,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":44,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":179,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.4436837136745453,\n\t\t\t\t0.14453157782554626,\n\t\t\t\t0.00977183785289526\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.4436837136745453,\n\t\t\t\t0.08475638926029205,\n\t\t\t\t-0.013087860308587551\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":180,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":181,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":182,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":132,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":183,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"max\":[\n\t\t\t\t0.6080718636512756,\n\t\t\t\t0.12074020504951477,\n\t\t\t\t0.46730321645736694\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.770543098449707,\n\t\t\t\t-0.20597746968269348,\n\t\t\t\t-0.46730321645736694\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":184,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":185,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":186,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":264,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":187,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.476053386926651,\n\t\t\t\t0.16016773879528046,\n\t\t\t\t0.0097717996686697\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.476053386926651,\n\t\t\t\t-0.21107666194438934,\n\t\t\t\t-0.025682564824819565\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":188,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":189,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":190,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3179,\n\t\t\t\"max\":[\n\t\t\t\t0.27941370010375977,\n\t\t\t\t0.01292315125465393,\n\t\t\t\t0.29084140062332153\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.27941370010375977,\n\t\t\t\t-1.0405468940734863,\n\t\t\t\t0.2718624770641327\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":191,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3179,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":192,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":5160,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":193,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"max\":[\n\t\t\t\t0.46760934591293335,\n\t\t\t\t0.5681414008140564,\n\t\t\t\t0.2960485517978668\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.46760934591293335,\n\t\t\t\t0.30794796347618103,\n\t\t\t\t0.273823618888855\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":194,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":195,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":196,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":264,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":197,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":384,\n\t\t\t\"max\":[\n\t\t\t\t0.467467725276947,\n\t\t\t\t0.2073485553264618,\n\t\t\t\t0.2960485517978668\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.467467725276947,\n\t\t\t\t-0.836802065372467,\n\t\t\t\t0.273823618888855\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":198,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":199,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":200,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":528,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":201,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":64,\n\t\t\t\"max\":[\n\t\t\t\t0.5612022876739502,\n\t\t\t\t3.965745054301806e-05,\n\t\t\t\t0.3776973783969879\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5612024664878845,\n\t\t\t\t-0.015438186004757881,\n\t\t\t\t-0.35583171248435974\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":202,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":64,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":203,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":126,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":204,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.5526052713394165,\n\t\t\t\t0,\n\t\t\t\t0.3473944365978241\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5526053309440613,\n\t\t\t\t0,\n\t\t\t\t-0.34739458560943604\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":205,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":206,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t0.27151554822921753,\n\t\t\t\t0.0034180879592895508,\n\t\t\t\t0.0038742555771023035\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2715156078338623,\n\t\t\t\t0,\n\t\t\t\t-0.003874257206916809\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":207,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":208,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":209,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":643,\n\t\t\t\"max\":[\n\t\t\t\t0.21412484347820282,\n\t\t\t\t0.0617770254611969,\n\t\t\t\t0.20867447555065155\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21678970754146576,\n\t\t\t\t-0.060674868524074554,\n\t\t\t\t-0.20884692668914795\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":210,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":643,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":211,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":643,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":212,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2517,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":213,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1100,\n\t\t\t\"max\":[\n\t\t\t\t0.5314524173736572,\n\t\t\t\t0.28472211956977844,\n\t\t\t\t0.427214652299881\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5333153009414673,\n\t\t\t\t-0.2524959444999695,\n\t\t\t\t-0.49720922112464905\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":214,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1100,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":215,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6528,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":216,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4148,\n\t\t\t\"max\":[\n\t\t\t\t0.6636195778846741,\n\t\t\t\t0.1477474719285965,\n\t\t\t\t0.42036381363868713\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.003775038057938218,\n\t\t\t\t-0.07614853233098984,\n\t\t\t\t-0.42036381363868713\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":217,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4148,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":218,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":24384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":219,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3734,\n\t\t\t\"max\":[\n\t\t\t\t0.5434067845344543,\n\t\t\t\t0.7616413831710815,\n\t\t\t\t0.6681085228919983\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5434136986732483,\n\t\t\t\t3.0291071198007558e-06,\n\t\t\t\t-0.6681085228919983\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":220,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3734,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":221,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":22272,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":222,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":651,\n\t\t\t\"max\":[\n\t\t\t\t0.21286319196224213,\n\t\t\t\t0.05543559044599533,\n\t\t\t\t0.21552662551403046\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21678972244262695,\n\t\t\t\t-0.06069469451904297,\n\t\t\t\t-0.20884694159030914\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":223,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":651,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":224,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":651,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":225,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2517,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":226,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1291,\n\t\t\t\"max\":[\n\t\t\t\t0.2711111903190613,\n\t\t\t\t0.20877385139465332,\n\t\t\t\t0.27094924449920654\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.27347132563591003,\n\t\t\t\t-0.20877386629581451,\n\t\t\t\t-0.27096250653266907\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":227,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1291,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":228,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1291,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":229,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6756,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":230,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.4989388585090637,\n\t\t\t\t0.04189485311508179,\n\t\t\t\t0.012411701492965221\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.16537515819072723,\n\t\t\t\t-0.04615183174610138,\n\t\t\t\t-0.012411782518029213\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":231,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":232,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":384,\n\t\t\t\"max\":[\n\t\t\t\t0.4989388883113861,\n\t\t\t\t0.15458370745182037,\n\t\t\t\t0.983651340007782\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.16847984492778778,\n\t\t\t\t-0.1685476452112198,\n\t\t\t\t-0.012411730363965034\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":233,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":234,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":528,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":235,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":208,\n\t\t\t\"max\":[\n\t\t\t\t0.5214969515800476,\n\t\t\t\t0.5009514689445496,\n\t\t\t\t0.1687571406364441\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.19113034009933472,\n\t\t\t\t-0.935589075088501,\n\t\t\t\t-0.1687571257352829\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":236,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":208,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":237,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":324,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":238,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":306,\n\t\t\t\"max\":[\n\t\t\t\t0.045450564473867416,\n\t\t\t\t0.3660617172718048,\n\t\t\t\t0.29084140062332153\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.045450564473867416,\n\t\t\t\t0.351283460855484,\n\t\t\t\t0.2718624770641327\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":239,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":306,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":240,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":516,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":241,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.2637956142425537,\n\t\t\t\t0.4708976447582245,\n\t\t\t\t0.2960485517978668\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2637956142425537,\n\t\t\t\t0.2577918767929077,\n\t\t\t\t0.273823618888855\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":242,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":243,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":244,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":132,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":245,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2781,\n\t\t\t\"max\":[\n\t\t\t\t2.504610300064087,\n\t\t\t\t1.0724855661392212,\n\t\t\t\t0.42410728335380554\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-2.504610300064087,\n\t\t\t\t-1.0724855661392212,\n\t\t\t\t-0.42410728335380554\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":246,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2781,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":247,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":13440,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":248,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1248,\n\t\t\t\"max\":[\n\t\t\t\t0.08179128170013428,\n\t\t\t\t0.0073179202154278755,\n\t\t\t\t0.04479164630174637\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08179129660129547,\n\t\t\t\t-0.0073179202154278755,\n\t\t\t\t-0.04479164630174637\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":249,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1248,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":250,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3996,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":251,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":228,\n\t\t\t\"max\":[\n\t\t\t\t0.8769688606262207,\n\t\t\t\t-9.049092031254702e-11,\n\t\t\t\t0.47029218077659607\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.37035518884658813,\n\t\t\t\t-0.018523098900914192,\n\t\t\t\t-0.5797899961471558\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":252,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":228,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":253,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":254,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.8473078608512878,\n\t\t\t\t-0.018522948026657104,\n\t\t\t\t0.4431114196777344\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.34069839119911194,\n\t\t\t\t-0.018523098900914192,\n\t\t\t\t-0.5525795221328735\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":255,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":256,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":257,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.191134512424469,\n\t\t\t\t-0.010391523130238056,\n\t\t\t\t0.25108322501182556\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.1911345273256302,\n\t\t\t\t-0.01039156224578619,\n\t\t\t\t-0.31312862038612366\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":258,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":259,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.19238071143627167,\n\t\t\t\t-0.010459273122251034,\n\t\t\t\t0.25100603699684143\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.19238072633743286,\n\t\t\t\t-0.010459313169121742,\n\t\t\t\t-0.31301558017730713\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":260,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":261,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":262,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"max\":[\n\t\t\t\t0.22301608324050903,\n\t\t\t\t-1.1375549547665287e-05,\n\t\t\t\t0.28319498896598816\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.22301611304283142,\n\t\t\t\t-0.011142595671117306,\n\t\t\t\t-0.3491310477256775\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":263,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":264,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":265,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"max\":[\n\t\t\t\t0.009505322203040123,\n\t\t\t\t-0.300081729888916,\n\t\t\t\t0.009544174186885357\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.009505323134362698,\n\t\t\t\t-0.3362524211406708,\n\t\t\t\t-0.009544174186885357\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":266,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":267,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1440,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":268,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"max\":[\n\t\t\t\t0.0007438515312969685,\n\t\t\t\t-0.00010429711983306333,\n\t\t\t\t0.0008589255739934742\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0007438514148816466,\n\t\t\t\t-0.31804805994033813,\n\t\t\t\t-0.0008589255739934742\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":269,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":270,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":271,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t0.018276110291481018,\n\t\t\t\t3.0816596496840987e-10,\n\t\t\t\t0.011351683177053928\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.018276114016771317,\n\t\t\t\t-0.004464164841920137,\n\t\t\t\t-0.011873414739966393\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":272,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":273,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":84,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":274,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"max\":[\n\t\t\t\t0.30904194712638855,\n\t\t\t\t-0.09976454079151154,\n\t\t\t\t0.1980161815881729\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.30904194712638855,\n\t\t\t\t-0.15826284885406494,\n\t\t\t\t0.16872520744800568\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":275,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":276,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":168,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":277,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":682,\n\t\t\t\"max\":[\n\t\t\t\t0.023798061534762383,\n\t\t\t\t0.29990243911743164,\n\t\t\t\t0.02172921784222126\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02436229959130287,\n\t\t\t\t-0.29990243911743164,\n\t\t\t\t-0.024080166593194008\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":278,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":682,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":279,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":280,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1116,\n\t\t\t\"max\":[\n\t\t\t\t0.002438030205667019,\n\t\t\t\t0.2740495800971985,\n\t\t\t\t0.2786678671836853\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0008455177885480225,\n\t\t\t\t-0.2740495800971985,\n\t\t\t\t0.23602034151554108\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":281,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1116,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":282,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1800,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":283,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2866,\n\t\t\t\"max\":[\n\t\t\t\t0.004097497556358576,\n\t\t\t\t0.2889848053455353,\n\t\t\t\t0.34853753447532654\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0004354871343821287,\n\t\t\t\t-0.2889847755432129,\n\t\t\t\t-0.0017901991959661245\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":284,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2866,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":285,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4416,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":286,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":194,\n\t\t\t\"max\":[\n\t\t\t\t0.028982071205973625,\n\t\t\t\t0.28915202617645264,\n\t\t\t\t0.03211777284741402\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02898208610713482,\n\t\t\t\t-0.28915202617645264,\n\t\t\t\t-0.03211776167154312\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":287,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":194,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":288,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":768,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":289,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":194,\n\t\t\t\"max\":[\n\t\t\t\t0.028982071205973625,\n\t\t\t\t0.28915202617645264,\n\t\t\t\t0.03211777284741402\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02898208610713482,\n\t\t\t\t-0.28915202617645264,\n\t\t\t\t-0.03211776167154312\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":290,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":194,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":291,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2908,\n\t\t\t\"max\":[\n\t\t\t\t0.004097497556358576,\n\t\t\t\t0.2889848053455353,\n\t\t\t\t0.3711926341056824\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0004354871343821287,\n\t\t\t\t-0.2889847755432129,\n\t\t\t\t-0.0017901991959661245\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":292,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2908,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":293,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4416,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":294,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1116,\n\t\t\t\"max\":[\n\t\t\t\t0.002438030205667019,\n\t\t\t\t0.2740495800971985,\n\t\t\t\t0.2786678671836853\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0008455177885480225,\n\t\t\t\t-0.2740495800971985,\n\t\t\t\t0.23602034151554108\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":295,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1116,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":296,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":682,\n\t\t\t\"max\":[\n\t\t\t\t0.023798061534762383,\n\t\t\t\t0.29990243911743164,\n\t\t\t\t0.02172921784222126\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02436229959130287,\n\t\t\t\t-0.29990243911743164,\n\t\t\t\t-0.024080166593194008\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":297,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":682,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":298,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"max\":[\n\t\t\t\t0.30904194712638855,\n\t\t\t\t-0.09976454079151154,\n\t\t\t\t0.1980161815881729\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.30904194712638855,\n\t\t\t\t-0.15826284885406494,\n\t\t\t\t0.16872520744800568\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":299,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":300,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t0.018276110291481018,\n\t\t\t\t3.0816596496840987e-10,\n\t\t\t\t0.011351683177053928\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.018276114016771317,\n\t\t\t\t-0.004464164841920137,\n\t\t\t\t-0.011873414739966393\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":301,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":302,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"max\":[\n\t\t\t\t0.0007438513566739857,\n\t\t\t\t-0.00010429711983306333,\n\t\t\t\t0.0008589253993704915\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0007438512402586639,\n\t\t\t\t-0.31804805994033813,\n\t\t\t\t-0.0008589253993704915\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":303,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":304,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"max\":[\n\t\t\t\t0.009505320340394974,\n\t\t\t\t-0.300081729888916,\n\t\t\t\t0.009544173255562782\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.009505320340394974,\n\t\t\t\t-0.3362524211406708,\n\t\t\t\t-0.009544172324240208\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":305,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":306,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"max\":[\n\t\t\t\t0.009505320340394974,\n\t\t\t\t-0.300081729888916,\n\t\t\t\t0.009544173255562782\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.009505322203040123,\n\t\t\t\t-0.3362524211406708,\n\t\t\t\t-0.009544172324240208\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":307,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":252,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":308,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"max\":[\n\t\t\t\t0.0007438514148816466,\n\t\t\t\t-0.00010429711983306333,\n\t\t\t\t0.0008589254575781524\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0007438512984663248,\n\t\t\t\t-0.31804805994033813,\n\t\t\t\t-0.0008589254575781524\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":309,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":310,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t0.01827610842883587,\n\t\t\t\t3.0816593721283425e-10,\n\t\t\t\t0.011351683177053928\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.018276112154126167,\n\t\t\t\t-0.00446416437625885,\n\t\t\t\t-0.011873414739966393\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":311,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":312,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"max\":[\n\t\t\t\t0.4316304624080658,\n\t\t\t\t-0.0997641459107399,\n\t\t\t\t0.1980161815881729\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.4316304624080658,\n\t\t\t\t-0.1582624465227127,\n\t\t\t\t0.16872520744800568\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":313,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":88,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":314,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":692,\n\t\t\t\"max\":[\n\t\t\t\t0.02408015914261341,\n\t\t\t\t0.42319056391716003,\n\t\t\t\t0.021729225292801857\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.024080200120806694,\n\t\t\t\t-0.42319056391716003,\n\t\t\t\t-0.02408016100525856\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":315,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":692,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":316,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":317,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1092,\n\t\t\t\"max\":[\n\t\t\t\t0.0023479796946048737,\n\t\t\t\t0.4123048782348633,\n\t\t\t\t0.2786678075790405\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0008455414208583534,\n\t\t\t\t-0.4123048782348633,\n\t\t\t\t0.23601903021335602\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":318,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1092,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":319,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1800,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":320,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2721,\n\t\t\t\"max\":[\n\t\t\t\t0.003890065010637045,\n\t\t\t\t0.4122636616230011,\n\t\t\t\t0.2789841592311859\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.00043549638940021396,\n\t\t\t\t-0.4122636914253235,\n\t\t\t\t0.0001247574109584093\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":321,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2721,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":322,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4416,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":323,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":288,\n\t\t\t\"max\":[\n\t\t\t\t0.028982071205973625,\n\t\t\t\t0.41272681951522827,\n\t\t\t\t0.02898208424448967\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02898208238184452,\n\t\t\t\t-0.41272681951522827,\n\t\t\t\t-0.028982071205973625\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":324,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":288,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":325,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1536,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":326,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1506,\n\t\t\t\"max\":[\n\t\t\t\t0.5433828830718994,\n\t\t\t\t0.7616105079650879,\n\t\t\t\t0.668077826499939\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5433710217475891,\n\t\t\t\t-2.7697407858795486e-05,\n\t\t\t\t-0.668077826499939\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":327,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1506,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":328,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":8904,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":329,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2052,\n\t\t\t\"max\":[\n\t\t\t\t0.6636162996292114,\n\t\t\t\t0.1477474421262741,\n\t\t\t\t0.4203604757785797\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.0037609413266181946,\n\t\t\t\t-0.07614852488040924,\n\t\t\t\t-0.4203604757785797\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":330,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2052,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":331,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":12192,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":332,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2211,\n\t\t\t\"max\":[\n\t\t\t\t0.531452476978302,\n\t\t\t\t0.28472208976745605,\n\t\t\t\t0.427214652299881\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5333153009414673,\n\t\t\t\t-0.25249597430229187,\n\t\t\t\t-0.49720922112464905\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":333,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2211,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":334,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":13056,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":335,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1502,\n\t\t\t\"max\":[\n\t\t\t\t0.5433831214904785,\n\t\t\t\t0.7671076059341431,\n\t\t\t\t1.1618307828903198\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5433714985847473,\n\t\t\t\t2.4727849449845962e-05,\n\t\t\t\t-1.16182279586792\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":336,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1502,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":337,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":8904,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":338,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4158,\n\t\t\t\"max\":[\n\t\t\t\t0.6720637083053589,\n\t\t\t\t0.1477474868297577,\n\t\t\t\t0.9239981770515442\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.0037750855553895235,\n\t\t\t\t-0.07614850252866745,\n\t\t\t\t-0.9239981770515442\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":339,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4158,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":340,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":24384,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":341,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1320,\n\t\t\t\"max\":[\n\t\t\t\t0.5314525961875916,\n\t\t\t\t0.2846877872943878,\n\t\t\t\t0.46221214532852173\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5332691669464111,\n\t\t\t\t-0.2524959444999695,\n\t\t\t\t-1.3866393566131592\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":342,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1320,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":343,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":7830,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":344,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":640,\n\t\t\t\"max\":[\n\t\t\t\t0.21412476897239685,\n\t\t\t\t0.055548928678035736,\n\t\t\t\t0.21552662551403046\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.21678969264030457,\n\t\t\t\t-0.06069469079375267,\n\t\t\t\t-0.20884692668914795\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":345,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":640,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":346,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":640,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":347,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2517,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":348,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":470,\n\t\t\t\"max\":[\n\t\t\t\t0.8406214714050293,\n\t\t\t\t0.014209745451807976,\n\t\t\t\t0.5578946471214294\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.8413571119308472,\n\t\t\t\t2.396662239334546e-05,\n\t\t\t\t-0.554294228553772\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":349,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":470,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":350,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":470,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":351,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2064,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":352,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"max\":[\n\t\t\t\t0.2637956142425537,\n\t\t\t\t0.4708976447582245,\n\t\t\t\t0.2960485517978668\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2637956142425537,\n\t\t\t\t0.2577918767929077,\n\t\t\t\t0.273823618888855\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":353,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":354,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":96,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":355,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":306,\n\t\t\t\"max\":[\n\t\t\t\t0.045450564473867416,\n\t\t\t\t0.3660617172718048,\n\t\t\t\t0.29084140062332153\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.045450564473867416,\n\t\t\t\t0.351283460855484,\n\t\t\t\t0.2718624770641327\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":356,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":306,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":357,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":516,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":358,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2652,\n\t\t\t\"max\":[\n\t\t\t\t0.2668611407279968,\n\t\t\t\t0.24918457865715027,\n\t\t\t\t0.6201762557029724\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2668611407279968,\n\t\t\t\t-0.010967996902763844,\n\t\t\t\t-0.6201762557029724\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":359,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2652,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":360,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2652,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":361,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":8139,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":362,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"max\":[\n\t\t\t\t0.2878775894641876,\n\t\t\t\t0.47464823722839355,\n\t\t\t\t0.29901182651519775\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2878775894641876,\n\t\t\t\t0.25646138191223145,\n\t\t\t\t-0.2990120053291321\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":363,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":364,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":48,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":365,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":72,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":366,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":672,\n\t\t\t\"max\":[\n\t\t\t\t0.024215681478381157,\n\t\t\t\t2.5066010493901558e-05,\n\t\t\t\t0.024479782208800316\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.024215679615736008,\n\t\t\t\t-0.0065596685744822025,\n\t\t\t\t-0.02395157888531685\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":367,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":672,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":368,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3840,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":369,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":672,\n\t\t\t\"max\":[\n\t\t\t\t0.024215681478381157,\n\t\t\t\t2.5066010493901558e-05,\n\t\t\t\t0.024479782208800316\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.024215679615736008,\n\t\t\t\t-0.0065596685744822025,\n\t\t\t\t-0.02395157888531685\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":370,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":672,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":371,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1696,\n\t\t\t\"max\":[\n\t\t\t\t0.028645366430282593,\n\t\t\t\t-8.73478566063568e-05,\n\t\t\t\t0.028645364567637444\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.028645364567637444,\n\t\t\t\t-0.013904731720685959,\n\t\t\t\t-0.028645362704992294\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":372,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1696,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":373,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":9984,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":374,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":243,\n\t\t\t\"max\":[\n\t\t\t\t0.013928772881627083,\n\t\t\t\t-0.004239760804921389,\n\t\t\t\t0.013928774744272232\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.013928775675594807,\n\t\t\t\t-0.009752316400408745,\n\t\t\t\t-0.013928773812949657\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":375,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":243,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":376,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":377,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3664,\n\t\t\t\"max\":[\n\t\t\t\t0.02991061471402645,\n\t\t\t\t0.015455699525773525,\n\t\t\t\t0.5116071105003357\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0299106165766716,\n\t\t\t\t-3.0331082598422654e-05,\n\t\t\t\t-0.02991061471402645\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":378,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3664,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":379,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":19776,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":380,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4584,\n\t\t\t\"max\":[\n\t\t\t\t0.02991061471402645,\n\t\t\t\t0.018386347219347954,\n\t\t\t\t0.5116071105003357\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0299106165766716,\n\t\t\t\t-3.0331082598422654e-05,\n\t\t\t\t-0.02991061471402645\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":381,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4584,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":382,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":25152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":383,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2075,\n\t\t\t\"max\":[\n\t\t\t\t0.05743894353508949,\n\t\t\t\t0.03309363126754761,\n\t\t\t\t0.3695560693740845\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.057435255497694016,\n\t\t\t\t-0.03309362754225731,\n\t\t\t\t-0.30479103326797485\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":384,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2075,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":385,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":11784,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":386,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3063,\n\t\t\t\"max\":[\n\t\t\t\t0.05743894353508949,\n\t\t\t\t0.03304240480065346,\n\t\t\t\t0.3695560693740845\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.057435255497694016,\n\t\t\t\t-0.033042408525943756,\n\t\t\t\t-0.30486488342285156\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":387,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":3063,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":388,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":17682,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":389,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":5205,\n\t\t\t\"max\":[\n\t\t\t\t0.057455360889434814,\n\t\t\t\t0.03329318016767502,\n\t\t\t\t0.30475133657455444\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.05747444927692413,\n\t\t\t\t-0.825806736946106,\n\t\t\t\t-0.30475133657455444\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":390,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":5205,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":391,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":27954,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":392,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":387,\n\t\t\t\"max\":[\n\t\t\t\t0.02317667007446289,\n\t\t\t\t0.017622478306293488,\n\t\t\t\t0.036703143268823624\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.023189472034573555,\n\t\t\t\t-0.02382655441761017,\n\t\t\t\t-0.023113075643777847\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":393,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":387,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":394,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2160,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":395,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":5368,\n\t\t\t\"max\":[\n\t\t\t\t0.019682668149471283,\n\t\t\t\t0.0077690789476037025,\n\t\t\t\t0.019682668149471283\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.019682668149471283,\n\t\t\t\t-0.21444717049598694,\n\t\t\t\t-0.019682668149471283\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":396,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":5368,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":397,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":30528,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":398,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":761,\n\t\t\t\"max\":[\n\t\t\t\t0.015527750365436077,\n\t\t\t\t-0.06980611383914948,\n\t\t\t\t0.0072495341300964355\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.015527751296758652,\n\t\t\t\t-0.21446961164474487,\n\t\t\t\t-0.04016909748315811\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":399,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":761,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":400,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4374,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":401,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2323,\n\t\t\t\"max\":[\n\t\t\t\t0.06420924514532089,\n\t\t\t\t0.05869099125266075,\n\t\t\t\t0.044763900339603424\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.044763900339603424,\n\t\t\t\t3.658107380033471e-05,\n\t\t\t\t-0.04476391151547432\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":402,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2323,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":403,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":13650,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":404,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2370,\n\t\t\t\"max\":[\n\t\t\t\t0.0691460371017456,\n\t\t\t\t0.011226482689380646,\n\t\t\t\t0.0691460445523262\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0691460520029068,\n\t\t\t\t0.00014830658619757742,\n\t\t\t\t-0.0691460371017456\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":405,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2370,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":406,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":14208,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":407,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":984,\n\t\t\t\"max\":[\n\t\t\t\t0.045120880007743835,\n\t\t\t\t0.01545932050794363,\n\t\t\t\t0.011687418445944786\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08727649599313736,\n\t\t\t\t-1.3494890481524635e-05,\n\t\t\t\t-0.011687335558235645\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":408,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":984,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":409,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":5892,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":410,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2919,\n\t\t\t\"max\":[\n\t\t\t\t0.11718066036701202,\n\t\t\t\t0.13938021659851074,\n\t\t\t\t0.07898714393377304\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.14315040409564972,\n\t\t\t\t6.240474613150582e-05,\n\t\t\t\t-0.07898714393377304\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":411,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2919,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":412,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":17451,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":413,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4833,\n\t\t\t\"max\":[\n\t\t\t\t0.04082781821489334,\n\t\t\t\t0.16709978878498077,\n\t\t\t\t0.04082781821489334\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.04082781821489334,\n\t\t\t\t0.12753257155418396,\n\t\t\t\t-0.04082782566547394\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":414,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4833,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":415,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":28800,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":416,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1951,\n\t\t\t\"max\":[\n\t\t\t\t1.2633671760559082,\n\t\t\t\t7.536155699483515e-09,\n\t\t\t\t0.9429534077644348\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.2633671760559082,\n\t\t\t\t-0.03758739307522774,\n\t\t\t\t-0.9311918020248413\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":417,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1951,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":418,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":10560,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":419,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2892,\n\t\t\t\"max\":[\n\t\t\t\t0.5521746277809143,\n\t\t\t\t0.015419553965330124,\n\t\t\t\t0.6326165199279785\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.49043479561805725,\n\t\t\t\t-0.015419621020555496,\n\t\t\t\t-0.6613004803657532\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":420,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2892,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":421,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2892,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":422,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":12906,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":423,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":458,\n\t\t\t\"max\":[\n\t\t\t\t0.9241786599159241,\n\t\t\t\t0.958294689655304,\n\t\t\t\t0.9241788387298584\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9241786599159241,\n\t\t\t\t0.898910641670227,\n\t\t\t\t-0.9241783022880554\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":424,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":458,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":425,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":458,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":426,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2304,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":427,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"max\":[\n\t\t\t\t0.5798277258872986,\n\t\t\t\t0.8989107608795166,\n\t\t\t\t0.579828679561615\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.5798280239105225,\n\t\t\t\t0.8822780251502991,\n\t\t\t\t-0.5798268914222717\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":428,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":429,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":192,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":430,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":768,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":431,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":448,\n\t\t\t\"max\":[\n\t\t\t\t0.7299965620040894,\n\t\t\t\t-0.7179452776908875,\n\t\t\t\t13.385577201843262\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.7299966216087341,\n\t\t\t\t-0.7817333340644836,\n\t\t\t\t-13.38558292388916\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":432,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":448,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":433,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2304,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":434,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":513,\n\t\t\t\"max\":[\n\t\t\t\t0.7297933101654053,\n\t\t\t\t0.040191370993852615,\n\t\t\t\t13.381857872009277\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.7297933101654053,\n\t\t\t\t-0.054171591997146606,\n\t\t\t\t-13.381854057312012\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":435,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":513,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":436,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2880,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":437,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":772,\n\t\t\t\"max\":[\n\t\t\t\t1.0000001192092896,\n\t\t\t\t1.0000001192092896,\n\t\t\t\t1.2264912128448486\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-52.15318298339844,\n\t\t\t\t-1.0000001192092896,\n\t\t\t\t-1.3088507652282715\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":438,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":772,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":439,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4608,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":440,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1896,\n\t\t\t\"max\":[\n\t\t\t\t1.6478947401046753,\n\t\t\t\t0.03785727918148041,\n\t\t\t\t19.271907806396484\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.6527422666549683,\n\t\t\t\t-2.6613372028805315e-05,\n\t\t\t\t-25.50284194946289\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":441,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1896,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":442,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":9729,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":443,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":424,\n\t\t\t\"max\":[\n\t\t\t\t5.885627746582031,\n\t\t\t\t0.504646897315979,\n\t\t\t\t2.0028798580169678\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-5.885629653930664,\n\t\t\t\t-0.5046470165252686,\n\t\t\t\t-1.1367090940475464\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":444,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":424,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":445,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2496,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":446,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":110,\n\t\t\t\"max\":[\n\t\t\t\t1.2648602724075317,\n\t\t\t\t9.051706051366182e-09,\n\t\t\t\t0.9511604309082031\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.2648602724075317,\n\t\t\t\t-8.854934563373718e-09,\n\t\t\t\t-0.9311917424201965\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":447,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":110,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":448,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":540,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":449,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":353,\n\t\t\t\"max\":[\n\t\t\t\t0.12745577096939087,\n\t\t\t\t0.08690422028303146,\n\t\t\t\t2.3370938301086426\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.12745580077171326,\n\t\t\t\t-0.15926377475261688,\n\t\t\t\t-2.3370931148529053\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":450,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":353,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":451,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":2064,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":452,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":876,\n\t\t\t\"max\":[\n\t\t\t\t1.2632966041564941,\n\t\t\t\t7.536156587661935e-09,\n\t\t\t\t0.9429534673690796\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.2632967233657837,\n\t\t\t\t-0.5335023999214172,\n\t\t\t\t-0.9311918020248413\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":453,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":876,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":454,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":4752,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":455,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2560,\n\t\t\t\"max\":[\n\t\t\t\t1.2643764019012451,\n\t\t\t\t0.2753285765647888,\n\t\t\t\t0.9475919008255005\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.2643764019012451,\n\t\t\t\t0.2506295144557953,\n\t\t\t\t-0.9368728995323181\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":456,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2560,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":457,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":15360,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":458,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":640,\n\t\t\t\"max\":[\n\t\t\t\t1.6644946336746216,\n\t\t\t\t-0.32771193981170654,\n\t\t\t\t22.462453842163086\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.6644943952560425,\n\t\t\t\t-0.3600073456764221,\n\t\t\t\t-22.719451904296875\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":459,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":640,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":460,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3840,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":461,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":650,\n\t\t\t\"max\":[\n\t\t\t\t1.2243093252182007,\n\t\t\t\t4.99021179933834e-09,\n\t\t\t\t2.2311463356018066\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.2243093252182007,\n\t\t\t\t-0.42578771710395813,\n\t\t\t\t-0.677730917930603\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":462,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":650,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":463,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3888,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":464,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"max\":[\n\t\t\t\t0.08039847761392593,\n\t\t\t\t-0.004371070768684149,\n\t\t\t\t0.10489880293607712\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08039848506450653,\n\t\t\t\t-0.0043710870668292046,\n\t\t\t\t-0.13081341981887817\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":465,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":466,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":467,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":300,\n\t\t\t\"max\":[\n\t\t\t\t0.006832329090684652,\n\t\t\t\t-7.933522283565253e-06,\n\t\t\t\t0.006860257126390934\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.0068323309533298016,\n\t\t\t\t-0.02199321798980236,\n\t\t\t\t-0.006860255263745785\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":468,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":300,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":469,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1728,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":470,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":700,\n\t\t\t\"max\":[\n\t\t\t\t0.18859754502773285,\n\t\t\t\t0.057214487344026566,\n\t\t\t\t0.008845220319926739\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.18859755992889404,\n\t\t\t\t-0.0034839953295886517,\n\t\t\t\t-0.054255563765764236\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":471,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":700,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":472,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3972,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":473,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":317,\n\t\t\t\"max\":[\n\t\t\t\t0.19292476773262024,\n\t\t\t\t0.23947319388389587,\n\t\t\t\t0.009282649494707584\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.19292476773262024,\n\t\t\t\t-0.005541855935007334,\n\t\t\t\t-0.002669593319296837\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":474,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":317,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":475,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1554,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":476,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4080,\n\t\t\t\"max\":[\n\t\t\t\t0.017140550538897514,\n\t\t\t\t0.03083072043955326,\n\t\t\t\t0.1839798539876938\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.0014622606104239821,\n\t\t\t\t-0.0016518529737368226,\n\t\t\t\t-0.1839798539876938\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":477,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":4080,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":478,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":24192,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":479,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":116,\n\t\t\t\"max\":[\n\t\t\t\t0.01459871418774128,\n\t\t\t\t-8.859956142259762e-05,\n\t\t\t\t0.1814749836921692\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t0.004253401421010494,\n\t\t\t\t-0.11362061649560928,\n\t\t\t\t-0.18147499859333038\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":480,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":116,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":481,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":672,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":482,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":701,\n\t\t\t\"max\":[\n\t\t\t\t0.19292476773262024,\n\t\t\t\t0.059158556163311005,\n\t\t\t\t0.009282649494707584\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.19292476773262024,\n\t\t\t\t-0.005541855935007334,\n\t\t\t\t-0.009235925041139126\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":483,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":701,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":484,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3972,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":485,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1044,\n\t\t\t\"max\":[\n\t\t\t\t0.18989714980125427,\n\t\t\t\t0.05946918576955795,\n\t\t\t\t0.11615249514579773\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.18989714980125427,\n\t\t\t\t9.935706657415722e-06,\n\t\t\t\t-0.005730097647756338\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":486,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1044,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":487,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":6216,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":488,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":241,\n\t\t\t\"max\":[\n\t\t\t\t0.2911272943019867,\n\t\t\t\t2.181886316066084e-08,\n\t\t\t\t0.6213991641998291\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.29112735390663147,\n\t\t\t\t-1.1645550301864205e-08,\n\t\t\t\t-0.40438249707221985\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":489,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":241,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":490,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1152,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":491,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2522,\n\t\t\t\"max\":[\n\t\t\t\t0.26504117250442505,\n\t\t\t\t0.1980399191379547,\n\t\t\t\t0.5879448056221008\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.2650412321090698,\n\t\t\t\t-0.018674420192837715,\n\t\t\t\t-0.2089131623506546\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":492,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2522,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":493,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":14016,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":494,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1480,\n\t\t\t\"max\":[\n\t\t\t\t0.026884857565164566,\n\t\t\t\t0.4139226973056793,\n\t\t\t\t0.14664645493030548\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.02788403630256653,\n\t\t\t\t-0.41392260789871216,\n\t\t\t\t-0.037943124771118164\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":495,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":1480,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":496,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":8712,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":497,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"max\":[\n\t\t\t\t0.3310958445072174,\n\t\t\t\t1.1071453094482422,\n\t\t\t\t0.5224316716194153\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08596266806125641,\n\t\t\t\t1.097179651260376,\n\t\t\t\t-0.5224316716194153\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":498,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":12,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":499,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":60,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":500,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":128,\n\t\t\t\"max\":[\n\t\t\t\t0.3586115539073944,\n\t\t\t\t1.1417394876480103,\n\t\t\t\t0.5487160086631775\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.38212496042251587,\n\t\t\t\t1.087152361869812,\n\t\t\t\t-0.5487160086631775\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":501,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":128,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":502,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":228,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":503,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":361,\n\t\t\t\"max\":[\n\t\t\t\t0.13963429629802704,\n\t\t\t\t1.1167515516281128,\n\t\t\t\t0.5015658736228943\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08627023547887802,\n\t\t\t\t1.087152361869812,\n\t\t\t\t-0.5015658736228943\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":504,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":361,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":505,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":516,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":506,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":554,\n\t\t\t\"max\":[\n\t\t\t\t0.05097965896129608,\n\t\t\t\t1.18086576461792,\n\t\t\t\t0.46035489439964294\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.08596266806125641,\n\t\t\t\t3.5204434175760066e-14,\n\t\t\t\t-0.46035489439964294\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":507,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":554,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":508,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":972,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":509,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"max\":[\n\t\t\t\t0.9999998807907104,\n\t\t\t\t1,\n\t\t\t\t0.9999997615814209\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999997019767761,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":510,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":511,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":512,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":24192,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":513,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"max\":[\n\t\t\t\t0.09569022059440613,\n\t\t\t\t0.10698522627353668,\n\t\t\t\t0.09144195914268494\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09569022804498672,\n\t\t\t\t-0.10698520392179489,\n\t\t\t\t-0.09144195914268494\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":514,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":515,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":516,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":87120,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":517,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"max\":[\n\t\t\t\t0.09569022059440613,\n\t\t\t\t0.10698522627353668,\n\t\t\t\t0.09144195914268494\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09569022804498672,\n\t\t\t\t-0.10698520392179489,\n\t\t\t\t-0.09144195914268494\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":518,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":519,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":520,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":87120,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":521,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999999403953552,\n\t\t\t\t-1,\n\t\t\t\t-0.9999999403953552\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":522,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":523,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":524,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":1440,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":525,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"max\":[\n\t\t\t\t0.09569022059440613,\n\t\t\t\t0.10698522627353668,\n\t\t\t\t0.09144195914268494\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09569022804498672,\n\t\t\t\t-0.10698520392179489,\n\t\t\t\t-0.09144195914268494\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":526,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":527,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":528,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"max\":[\n\t\t\t\t0.09569022059440613,\n\t\t\t\t0.10698522627353668,\n\t\t\t\t0.09144195914268494\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.09569022804498672,\n\t\t\t\t-0.10698520392179489,\n\t\t\t\t-0.09144195914268494\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":529,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":530,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":58080,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":531,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999999403953552,\n\t\t\t\t-1,\n\t\t\t\t-0.9999999403953552\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":532,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":533,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":287,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":534,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"max\":[\n\t\t\t\t1.25,\n\t\t\t\t0.25,\n\t\t\t\t1.25\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.25,\n\t\t\t\t-0.25,\n\t\t\t\t-1.25\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":535,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":536,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":637,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":537,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3456,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":538,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2304,\n\t\t\t\"max\":[\n\t\t\t\t1.1200000047683716,\n\t\t\t\t0.11999999731779099,\n\t\t\t\t1.1200000047683716\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1.1200000047683716,\n\t\t\t\t-0.11999999731779099,\n\t\t\t\t-1.1200000047683716\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":539,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2304,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":540,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":2304,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":541,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":3456,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":542,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":543,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":544,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":545,\n\t\t\t\"componentType\":5123,\n\t\t\t\"count\":36,\n\t\t\t\"type\":\"SCALAR\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":546,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":547,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":548,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":549,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":550,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":551,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":552,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":553,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":554,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":555,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":556,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":557,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":558,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":559,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":560,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":561,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":562,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":563,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":564,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":565,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":566,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":567,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":568,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":569,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":570,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"max\":[\n\t\t\t\t1,\n\t\t\t\t1,\n\t\t\t\t1\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-1,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":571,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":572,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":24,\n\t\t\t\"type\":\"VEC2\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":573,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"max\":[\n\t\t\t\t0.9999998807907104,\n\t\t\t\t1,\n\t\t\t\t0.9999997615814209\n\t\t\t],\n\t\t\t\"min\":[\n\t\t\t\t-0.9999997019767761,\n\t\t\t\t-1,\n\t\t\t\t-1\n\t\t\t],\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":574,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"type\":\"VEC3\"\n\t\t},\n\t\t{\n\t\t\t\"bufferView\":575,\n\t\t\t\"componentType\":5126,\n\t\t\t\"count\":16256,\n\t\t\t\"type\":\"VEC2\"\n\t\t}\n\t],\n\t\"bufferViews\":[\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7776,\n\t\t\t\"byteOffset\":0,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7776,\n\t\t\t\"byteOffset\":7776,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1872,\n\t\t\t\"byteOffset\":15552,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":17424,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":18000,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":384,\n\t\t\t\"byteOffset\":18576,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":396,\n\t\t\t\"byteOffset\":18960,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":19356,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":19932,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":384,\n\t\t\t\"byteOffset\":20508,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":396,\n\t\t\t\"byteOffset\":20892,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":11400,\n\t\t\t\"byteOffset\":21288,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":11400,\n\t\t\t\"byteOffset\":32688,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3432,\n\t\t\t\"byteOffset\":44088,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28608,\n\t\t\t\"byteOffset\":47520,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28608,\n\t\t\t\"byteOffset\":76128,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7560,\n\t\t\t\"byteOffset\":104736,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22368,\n\t\t\t\"byteOffset\":112296,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22368,\n\t\t\t\"byteOffset\":134664,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7548,\n\t\t\t\"byteOffset\":157032,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151680,\n\t\t\t\"byteOffset\":164580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151680,\n\t\t\t\"byteOffset\":316260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":37320,\n\t\t\t\"byteOffset\":467940,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151740,\n\t\t\t\"byteOffset\":505260,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151740,\n\t\t\t\"byteOffset\":657000,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":37320,\n\t\t\t\"byteOffset\":808740,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151944,\n\t\t\t\"byteOffset\":846060,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":151944,\n\t\t\t\"byteOffset\":998004,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":37320,\n\t\t\t\"byteOffset\":1149948,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1187268,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1187316,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":1187364,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1187376,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1187424,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4488,\n\t\t\t\"byteOffset\":1187472,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4488,\n\t\t\t\"byteOffset\":1191960,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4296,\n\t\t\t\"byteOffset\":1196448,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6648,\n\t\t\t\"byteOffset\":1200744,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6648,\n\t\t\t\"byteOffset\":1207392,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6624,\n\t\t\t\"byteOffset\":1214040,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7224,\n\t\t\t\"byteOffset\":1220664,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7224,\n\t\t\t\"byteOffset\":1227888,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7200,\n\t\t\t\"byteOffset\":1235112,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7800,\n\t\t\t\"byteOffset\":1242312,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7800,\n\t\t\t\"byteOffset\":1250112,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7776,\n\t\t\t\"byteOffset\":1257912,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1248,\n\t\t\t\"byteOffset\":1265688,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1248,\n\t\t\t\"byteOffset\":1266936,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":832,\n\t\t\t\"byteOffset\":1268184,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":456,\n\t\t\t\"byteOffset\":1269016,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5508,\n\t\t\t\"byteOffset\":1269472,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5508,\n\t\t\t\"byteOffset\":1274980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3672,\n\t\t\t\"byteOffset\":1280488,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":1284160,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":25368,\n\t\t\t\"byteOffset\":1288768,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":25368,\n\t\t\t\"byteOffset\":1314136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":25344,\n\t\t\t\"byteOffset\":1339504,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":216,\n\t\t\t\"byteOffset\":1364848,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":216,\n\t\t\t\"byteOffset\":1365064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":96,\n\t\t\t\"byteOffset\":1365280,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1365376,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1366528,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":264,\n\t\t\t\"byteOffset\":1367680,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1367944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1369096,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1370248,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1371400,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":264,\n\t\t\t\"byteOffset\":1372552,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1372816,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1373968,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1375120,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1376272,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":264,\n\t\t\t\"byteOffset\":1377424,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9480,\n\t\t\t\"byteOffset\":1377688,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9480,\n\t\t\t\"byteOffset\":1387168,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4416,\n\t\t\t\"byteOffset\":1396648,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10224,\n\t\t\t\"byteOffset\":1401064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10224,\n\t\t\t\"byteOffset\":1411288,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4104,\n\t\t\t\"byteOffset\":1421512,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8448,\n\t\t\t\"byteOffset\":1425616,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8448,\n\t\t\t\"byteOffset\":1434064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4224,\n\t\t\t\"byteOffset\":1442512,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10992,\n\t\t\t\"byteOffset\":1446736,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10992,\n\t\t\t\"byteOffset\":1457728,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5064,\n\t\t\t\"byteOffset\":1468720,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1473784,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1473832,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":1473880,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12,\n\t\t\t\"byteOffset\":1473912,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1473924,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1476996,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1480068,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1480836,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1483908,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1486980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1487028,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":1487076,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1487108,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1487156,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":1487204,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1487236,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1490308,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7668,\n\t\t\t\"byteOffset\":1493380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7668,\n\t\t\t\"byteOffset\":1501048,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5112,\n\t\t\t\"byteOffset\":1508716,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5034,\n\t\t\t\"byteOffset\":1513828,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1518864,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1519632,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":612,\n\t\t\t\"byteOffset\":1520400,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":864,\n\t\t\t\"byteOffset\":1521012,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":864,\n\t\t\t\"byteOffset\":1521876,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1522740,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":1523508,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":1523652,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":1523796,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1523868,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1526172,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1528476,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22512,\n\t\t\t\"byteOffset\":1530780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22512,\n\t\t\t\"byteOffset\":1553292,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15008,\n\t\t\t\"byteOffset\":1575804,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":16128,\n\t\t\t\"byteOffset\":1590812,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1606940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1610012,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1613084,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1613612,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1614140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":168,\n\t\t\t\"byteOffset\":1614492,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1614660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1615188,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1615716,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1616068,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1616596,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1617124,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1617476,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1618004,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1618532,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1618884,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1619412,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1619940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1620292,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1620820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1621348,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1621700,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1622228,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1622756,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1623108,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1623636,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1624164,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1624516,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1625044,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1625572,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1625924,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1626452,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1626980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1627332,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1627860,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1628388,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1628740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1629268,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1629796,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1630148,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1630676,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1631204,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1631556,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1632084,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1632612,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1632964,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1633492,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1634020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1634372,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1634900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1635428,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1635780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1636308,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1636836,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1637188,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1637716,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":352,\n\t\t\t\"byteOffset\":1638244,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1638596,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1639748,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1640900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":264,\n\t\t\t\"byteOffset\":1641668,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1641932,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1644236,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":1646540,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1648076,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1648604,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":1649756,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1650908,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":38148,\n\t\t\t\"byteOffset\":1651676,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":38148,\n\t\t\t\"byteOffset\":1689824,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10320,\n\t\t\t\"byteOffset\":1727972,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1738292,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":1740596,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":1742900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":528,\n\t\t\t\"byteOffset\":1744436,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":1744964,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":1749572,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":1754180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":1757252,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1758308,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":1759076,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":252,\n\t\t\t\"byteOffset\":1759844,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1760096,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":1760144,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":1760192,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":1760480,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":1760768,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7716,\n\t\t\t\"byteOffset\":1760840,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7716,\n\t\t\t\"byteOffset\":1768556,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5144,\n\t\t\t\"byteOffset\":1776272,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5034,\n\t\t\t\"byteOffset\":1781416,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13200,\n\t\t\t\"byteOffset\":1786452,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13200,\n\t\t\t\"byteOffset\":1799652,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13056,\n\t\t\t\"byteOffset\":1812852,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":49776,\n\t\t\t\"byteOffset\":1825908,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":49776,\n\t\t\t\"byteOffset\":1875684,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48768,\n\t\t\t\"byteOffset\":1925460,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":44808,\n\t\t\t\"byteOffset\":1974228,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":44808,\n\t\t\t\"byteOffset\":2019036,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":44544,\n\t\t\t\"byteOffset\":2063844,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7812,\n\t\t\t\"byteOffset\":2108388,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7812,\n\t\t\t\"byteOffset\":2116200,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5208,\n\t\t\t\"byteOffset\":2124012,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5034,\n\t\t\t\"byteOffset\":2129220,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15492,\n\t\t\t\"byteOffset\":2134256,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15492,\n\t\t\t\"byteOffset\":2149748,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10328,\n\t\t\t\"byteOffset\":2165240,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13512,\n\t\t\t\"byteOffset\":2175568,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":2189080,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":2190232,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":2191384,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":2195992,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2200600,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2496,\n\t\t\t\"byteOffset\":2201656,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2496,\n\t\t\t\"byteOffset\":2204152,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":648,\n\t\t\t\"byteOffset\":2206648,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3672,\n\t\t\t\"byteOffset\":2207296,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3672,\n\t\t\t\"byteOffset\":2210968,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1032,\n\t\t\t\"byteOffset\":2214640,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":2215672,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":2216824,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":2217976,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":264,\n\t\t\t\"byteOffset\":2218744,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":33372,\n\t\t\t\"byteOffset\":2219008,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":33372,\n\t\t\t\"byteOffset\":2252380,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":26880,\n\t\t\t\"byteOffset\":2285752,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":14976,\n\t\t\t\"byteOffset\":2312632,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":14976,\n\t\t\t\"byteOffset\":2327608,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7992,\n\t\t\t\"byteOffset\":2342584,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2736,\n\t\t\t\"byteOffset\":2350576,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2736,\n\t\t\t\"byteOffset\":2353312,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":2356048,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2356816,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2356864,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":2356912,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2356944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2356992,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2357040,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":2357088,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":2357136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":2357168,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":2360240,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":2363312,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2364080,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2367104,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2880,\n\t\t\t\"byteOffset\":2370128,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2373008,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2373152,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":2373296,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2373368,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2373944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":168,\n\t\t\t\"byteOffset\":2374520,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2374688,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2375744,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":336,\n\t\t\t\"byteOffset\":2376800,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8184,\n\t\t\t\"byteOffset\":2377136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8184,\n\t\t\t\"byteOffset\":2385320,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":2393504,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13392,\n\t\t\t\"byteOffset\":2395808,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13392,\n\t\t\t\"byteOffset\":2409200,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3600,\n\t\t\t\"byteOffset\":2422592,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34392,\n\t\t\t\"byteOffset\":2426192,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34392,\n\t\t\t\"byteOffset\":2460584,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8832,\n\t\t\t\"byteOffset\":2494976,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2328,\n\t\t\t\"byteOffset\":2503808,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2328,\n\t\t\t\"byteOffset\":2506136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":2508464,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2328,\n\t\t\t\"byteOffset\":2510000,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2328,\n\t\t\t\"byteOffset\":2512328,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34896,\n\t\t\t\"byteOffset\":2514656,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34896,\n\t\t\t\"byteOffset\":2549552,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8832,\n\t\t\t\"byteOffset\":2584448,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13392,\n\t\t\t\"byteOffset\":2593280,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13392,\n\t\t\t\"byteOffset\":2606672,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8184,\n\t\t\t\"byteOffset\":2620064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8184,\n\t\t\t\"byteOffset\":2628248,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2636432,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2637488,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2638544,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2639120,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2639696,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2639840,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2639984,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2643008,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2646032,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3024,\n\t\t\t\"byteOffset\":2649056,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2652080,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":2652224,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2652368,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":2652944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2653520,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1056,\n\t\t\t\"byteOffset\":2654576,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8304,\n\t\t\t\"byteOffset\":2655632,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8304,\n\t\t\t\"byteOffset\":2663936,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":2672240,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13104,\n\t\t\t\"byteOffset\":2674544,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":13104,\n\t\t\t\"byteOffset\":2687648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3600,\n\t\t\t\"byteOffset\":2700752,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32652,\n\t\t\t\"byteOffset\":2704352,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32652,\n\t\t\t\"byteOffset\":2737004,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8832,\n\t\t\t\"byteOffset\":2769656,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3456,\n\t\t\t\"byteOffset\":2778488,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3456,\n\t\t\t\"byteOffset\":2781944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3072,\n\t\t\t\"byteOffset\":2785400,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":18072,\n\t\t\t\"byteOffset\":2788472,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":18072,\n\t\t\t\"byteOffset\":2806544,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":17808,\n\t\t\t\"byteOffset\":2824616,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":24624,\n\t\t\t\"byteOffset\":2842424,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":24624,\n\t\t\t\"byteOffset\":2867048,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":24384,\n\t\t\t\"byteOffset\":2891672,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":26532,\n\t\t\t\"byteOffset\":2916056,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":26532,\n\t\t\t\"byteOffset\":2942588,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":26112,\n\t\t\t\"byteOffset\":2969120,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":18024,\n\t\t\t\"byteOffset\":2995232,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":18024,\n\t\t\t\"byteOffset\":3013256,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":17808,\n\t\t\t\"byteOffset\":3031280,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":49896,\n\t\t\t\"byteOffset\":3049088,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":49896,\n\t\t\t\"byteOffset\":3098984,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48768,\n\t\t\t\"byteOffset\":3148880,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15840,\n\t\t\t\"byteOffset\":3197648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15840,\n\t\t\t\"byteOffset\":3213488,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":15660,\n\t\t\t\"byteOffset\":3229328,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":3244988,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":3252668,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5120,\n\t\t\t\"byteOffset\":3260348,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5034,\n\t\t\t\"byteOffset\":3265468,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5640,\n\t\t\t\"byteOffset\":3270504,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5640,\n\t\t\t\"byteOffset\":3276144,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3760,\n\t\t\t\"byteOffset\":3281784,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4128,\n\t\t\t\"byteOffset\":3285544,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":3289672,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1152,\n\t\t\t\"byteOffset\":3290824,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":768,\n\t\t\t\"byteOffset\":3291976,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3672,\n\t\t\t\"byteOffset\":3292744,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3672,\n\t\t\t\"byteOffset\":3296416,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1032,\n\t\t\t\"byteOffset\":3300088,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":31824,\n\t\t\t\"byteOffset\":3301120,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":31824,\n\t\t\t\"byteOffset\":3332944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":21216,\n\t\t\t\"byteOffset\":3364768,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":16278,\n\t\t\t\"byteOffset\":3385984,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":3402264,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":576,\n\t\t\t\"byteOffset\":3402840,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":384,\n\t\t\t\"byteOffset\":3403416,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":3403800,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8064,\n\t\t\t\"byteOffset\":3403944,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8064,\n\t\t\t\"byteOffset\":3412008,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":3420072,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8064,\n\t\t\t\"byteOffset\":3427752,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8064,\n\t\t\t\"byteOffset\":3435816,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":20352,\n\t\t\t\"byteOffset\":3443880,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":20352,\n\t\t\t\"byteOffset\":3464232,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":19968,\n\t\t\t\"byteOffset\":3484584,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2916,\n\t\t\t\"byteOffset\":3504552,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2916,\n\t\t\t\"byteOffset\":3507468,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":3510384,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":43968,\n\t\t\t\"byteOffset\":3512688,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":43968,\n\t\t\t\"byteOffset\":3556656,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":39552,\n\t\t\t\"byteOffset\":3600624,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":55008,\n\t\t\t\"byteOffset\":3640176,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":55008,\n\t\t\t\"byteOffset\":3695184,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":50304,\n\t\t\t\"byteOffset\":3750192,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":24900,\n\t\t\t\"byteOffset\":3800496,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":24900,\n\t\t\t\"byteOffset\":3825396,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":23568,\n\t\t\t\"byteOffset\":3850296,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":36756,\n\t\t\t\"byteOffset\":3873864,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":36756,\n\t\t\t\"byteOffset\":3910620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":35364,\n\t\t\t\"byteOffset\":3947376,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":62460,\n\t\t\t\"byteOffset\":3982740,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":62460,\n\t\t\t\"byteOffset\":4045200,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":55908,\n\t\t\t\"byteOffset\":4107660,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4644,\n\t\t\t\"byteOffset\":4163568,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4644,\n\t\t\t\"byteOffset\":4168212,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4320,\n\t\t\t\"byteOffset\":4172856,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":64416,\n\t\t\t\"byteOffset\":4177176,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":64416,\n\t\t\t\"byteOffset\":4241592,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":61056,\n\t\t\t\"byteOffset\":4306008,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9132,\n\t\t\t\"byteOffset\":4367064,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9132,\n\t\t\t\"byteOffset\":4376196,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8748,\n\t\t\t\"byteOffset\":4385328,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":27876,\n\t\t\t\"byteOffset\":4394076,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":27876,\n\t\t\t\"byteOffset\":4421952,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":27300,\n\t\t\t\"byteOffset\":4449828,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28440,\n\t\t\t\"byteOffset\":4477128,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28440,\n\t\t\t\"byteOffset\":4505568,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28416,\n\t\t\t\"byteOffset\":4534008,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":11808,\n\t\t\t\"byteOffset\":4562424,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":11808,\n\t\t\t\"byteOffset\":4574232,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":11784,\n\t\t\t\"byteOffset\":4586040,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":35028,\n\t\t\t\"byteOffset\":4597824,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":35028,\n\t\t\t\"byteOffset\":4632852,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34902,\n\t\t\t\"byteOffset\":4667880,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":57996,\n\t\t\t\"byteOffset\":4702784,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":57996,\n\t\t\t\"byteOffset\":4760780,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":57600,\n\t\t\t\"byteOffset\":4818776,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":23412,\n\t\t\t\"byteOffset\":4876376,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":23412,\n\t\t\t\"byteOffset\":4899788,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":21120,\n\t\t\t\"byteOffset\":4923200,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34704,\n\t\t\t\"byteOffset\":4944320,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":34704,\n\t\t\t\"byteOffset\":4979024,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":23136,\n\t\t\t\"byteOffset\":5013728,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":25812,\n\t\t\t\"byteOffset\":5036864,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5496,\n\t\t\t\"byteOffset\":5062676,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5496,\n\t\t\t\"byteOffset\":5068172,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3664,\n\t\t\t\"byteOffset\":5073668,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":5077332,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":5081940,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":5084244,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":5086548,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":5088084,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5376,\n\t\t\t\"byteOffset\":5089620,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5376,\n\t\t\t\"byteOffset\":5094996,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4608,\n\t\t\t\"byteOffset\":5100372,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6156,\n\t\t\t\"byteOffset\":5104980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6156,\n\t\t\t\"byteOffset\":5111136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5760,\n\t\t\t\"byteOffset\":5117292,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9264,\n\t\t\t\"byteOffset\":5123052,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9264,\n\t\t\t\"byteOffset\":5132316,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9216,\n\t\t\t\"byteOffset\":5141580,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22752,\n\t\t\t\"byteOffset\":5150796,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":22752,\n\t\t\t\"byteOffset\":5173548,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":19458,\n\t\t\t\"byteOffset\":5196300,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5088,\n\t\t\t\"byteOffset\":5215760,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5088,\n\t\t\t\"byteOffset\":5220848,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4992,\n\t\t\t\"byteOffset\":5225936,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1320,\n\t\t\t\"byteOffset\":5230928,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1320,\n\t\t\t\"byteOffset\":5232248,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1080,\n\t\t\t\"byteOffset\":5233568,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4236,\n\t\t\t\"byteOffset\":5234648,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4236,\n\t\t\t\"byteOffset\":5238884,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4128,\n\t\t\t\"byteOffset\":5243120,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10512,\n\t\t\t\"byteOffset\":5247248,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":10512,\n\t\t\t\"byteOffset\":5257760,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":9504,\n\t\t\t\"byteOffset\":5268272,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":30720,\n\t\t\t\"byteOffset\":5277776,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":30720,\n\t\t\t\"byteOffset\":5308496,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":30720,\n\t\t\t\"byteOffset\":5339216,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":5369936,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":5377616,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7680,\n\t\t\t\"byteOffset\":5385296,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7800,\n\t\t\t\"byteOffset\":5392976,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7800,\n\t\t\t\"byteOffset\":5400776,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7776,\n\t\t\t\"byteOffset\":5408576,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":5416352,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48,\n\t\t\t\"byteOffset\":5416400,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":32,\n\t\t\t\"byteOffset\":5416448,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3600,\n\t\t\t\"byteOffset\":5416480,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3600,\n\t\t\t\"byteOffset\":5420080,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3456,\n\t\t\t\"byteOffset\":5423680,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8400,\n\t\t\t\"byteOffset\":5427136,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8400,\n\t\t\t\"byteOffset\":5435536,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7944,\n\t\t\t\"byteOffset\":5443936,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3804,\n\t\t\t\"byteOffset\":5451880,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3804,\n\t\t\t\"byteOffset\":5455684,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3108,\n\t\t\t\"byteOffset\":5459488,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48960,\n\t\t\t\"byteOffset\":5462596,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48960,\n\t\t\t\"byteOffset\":5511556,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48384,\n\t\t\t\"byteOffset\":5560516,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1392,\n\t\t\t\"byteOffset\":5608900,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1392,\n\t\t\t\"byteOffset\":5610292,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1344,\n\t\t\t\"byteOffset\":5611684,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8412,\n\t\t\t\"byteOffset\":5613028,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":8412,\n\t\t\t\"byteOffset\":5621440,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7944,\n\t\t\t\"byteOffset\":5629852,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12528,\n\t\t\t\"byteOffset\":5637796,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12528,\n\t\t\t\"byteOffset\":5650324,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":12432,\n\t\t\t\"byteOffset\":5662852,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2892,\n\t\t\t\"byteOffset\":5675284,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2892,\n\t\t\t\"byteOffset\":5678176,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2304,\n\t\t\t\"byteOffset\":5681068,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":30264,\n\t\t\t\"byteOffset\":5683372,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":30264,\n\t\t\t\"byteOffset\":5713636,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":28032,\n\t\t\t\"byteOffset\":5743900,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":17760,\n\t\t\t\"byteOffset\":5771932,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":17760,\n\t\t\t\"byteOffset\":5789692,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":17424,\n\t\t\t\"byteOffset\":5807452,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":5824876,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":144,\n\t\t\t\"byteOffset\":5825020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":120,\n\t\t\t\"byteOffset\":5825164,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":5825284,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1536,\n\t\t\t\"byteOffset\":5826820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":456,\n\t\t\t\"byteOffset\":5828356,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4332,\n\t\t\t\"byteOffset\":5828812,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":4332,\n\t\t\t\"byteOffset\":5833144,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1032,\n\t\t\t\"byteOffset\":5837476,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6648,\n\t\t\t\"byteOffset\":5838508,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6648,\n\t\t\t\"byteOffset\":5845156,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":1944,\n\t\t\t\"byteOffset\":5851804,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":195072,\n\t\t\t\"byteOffset\":5853748,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":195072,\n\t\t\t\"byteOffset\":6048820,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":130048,\n\t\t\t\"byteOffset\":6243892,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":48384,\n\t\t\t\"byteOffset\":6373940,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":6422324,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":7119284,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":464640,\n\t\t\t\"byteOffset\":7816244,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":174240,\n\t\t\t\"byteOffset\":8280884,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":8455124,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":9152084,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":464640,\n\t\t\t\"byteOffset\":9849044,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":174240,\n\t\t\t\"byteOffset\":10313684,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3444,\n\t\t\t\"byteOffset\":10487924,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3444,\n\t\t\t\"byteOffset\":10491368,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2296,\n\t\t\t\"byteOffset\":10494812,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2880,\n\t\t\t\"byteOffset\":10497108,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":10499988,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":11196948,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":464640,\n\t\t\t\"byteOffset\":11893908,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":12358548,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":696960,\n\t\t\t\"byteOffset\":13055508,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":464640,\n\t\t\t\"byteOffset\":13752468,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3444,\n\t\t\t\"byteOffset\":14217108,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":3444,\n\t\t\t\"byteOffset\":14220552,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":2296,\n\t\t\t\"byteOffset\":14223996,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":14226292,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":7644,\n\t\t\t\"byteOffset\":14233936,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":5096,\n\t\t\t\"byteOffset\":14241580,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6912,\n\t\t\t\"byteOffset\":14246676,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":27648,\n\t\t\t\"byteOffset\":14253588,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":27648,\n\t\t\t\"byteOffset\":14281236,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":18432,\n\t\t\t\"byteOffset\":14308884,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":6912,\n\t\t\t\"byteOffset\":14327316,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14334228,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14334516,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14334804,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":72,\n\t\t\t\"byteOffset\":14334996,\n\t\t\t\"target\":34963\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14335068,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14335356,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14335644,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14335836,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14336124,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14336412,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14336604,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14336892,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14337180,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14337372,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14337660,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14337948,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14338140,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14338428,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14338716,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14338908,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14339196,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14339484,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14339676,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14339964,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14340252,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14340444,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14340732,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14341020,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14341212,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":288,\n\t\t\t\"byteOffset\":14341500,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":192,\n\t\t\t\"byteOffset\":14341788,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":195072,\n\t\t\t\"byteOffset\":14341980,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":195072,\n\t\t\t\"byteOffset\":14537052,\n\t\t\t\"target\":34962\n\t\t},\n\t\t{\n\t\t\t\"buffer\":0,\n\t\t\t\"byteLength\":130048,\n\t\t\t\"byteOffset\":14732124,\n\t\t\t\"target\":34962\n\t\t}\n\t],\n\t\"samplers\":[\n\t\t{\n\t\t\t\"magFilter\":9729,\n\t\t\t\"minFilter\":9987\n\t\t}\n\t],\n\t\"buffers\":[\n\t\t{\n\t\t\t\"byteLength\":14862172,\n\t\t\t\"uri\":\"the-white-room-low.bin\"\n\t\t}\n\t]\n}\n"
  },
  {
    "path": "src/Compiler/GPUKernel.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompiler.h\"\n#include \"Compiler/GPUKernel.h\"\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n\nextern GPUKernelCompiler g_gpu_kernel_compiler;\nextern ImGuiLogger g_imgui_logger;\n\nconst std::vector<std::string> GPUKernel::COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS =\n{\n\tKERNEL_COMPILER_ADDITIONAL_INCLUDE,\n\tDEVICE_INCLUDES_DIRECTORY,\n\tOROCHI_INCLUDES_DIRECTORY,\n\t\"./\"\n};\n\nGPUKernel::GPUKernel()\n{\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_execution_start_event));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_execution_stop_event));\n}\n\nGPUKernel::GPUKernel(const std::string& kernel_file_path, const std::string& kernel_function_name) : GPUKernel()\n{\n\tm_kernel_file_path = kernel_file_path;\n\tm_kernel_function_name = kernel_function_name;\n}\n\nstd::string GPUKernel::get_kernel_file_path() const\n{\n\treturn m_kernel_file_path;\n}\n\nstd::string GPUKernel::get_kernel_function_name() const\n{\n\treturn m_kernel_function_name;\n}\n\nvoid GPUKernel::set_kernel_file_path(const std::string& kernel_file_path)\n{\n\tm_kernel_file_path = kernel_file_path;\n}\n\nvoid GPUKernel::set_kernel_function_name(const std::string& kernel_function_name)\n{\n\tm_kernel_function_name = kernel_function_name;\n}\n\nvoid GPUKernel::add_additional_macro_for_compilation(const std::string& name, int value)\n{\n\tm_additional_compilation_macros[name] = value;\n}\n\nstd::vector<std::string> GPUKernel::get_additional_compiler_macros() const\n{\n\tstd::vector<std::string> macros;\n\n\tfor (auto macro_key_value : m_additional_compilation_macros)\n\t\tmacros.push_back(\"-D \" + macro_key_value.first + \"=\" + std::to_string(macro_key_value.second));\n\n\treturn macros;\n}\n\nvoid GPUKernel::compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, std::vector<hiprtFuncNameSet> func_name_sets, bool use_cache, bool silent)\n{\n\tif (m_option_macro_invalidated)\n\t\tparse_option_macros_used();\n\n\tstd::string cache_key = g_gpu_kernel_compiler.get_additional_cache_key(*this);\n\tm_kernel_function = g_gpu_kernel_compiler.compile_kernel(*this, m_compiler_options, hiprt_ctx,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t func_name_sets.data(), \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t /* num geom */1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t /* num ray */ func_name_sets.size() == 0 ? 0 : 1,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t use_cache, cache_key, silent);\n}\n\nint GPUKernel::get_kernel_attribute(oroFunction compiled_kernel, oroFunction_attribute attribute)\n{\n\tint numRegs = 0;\n\n\tif (compiled_kernel == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to get an attribute of a kernel that wasn't compiled yet.\");\n\n\t\treturn 0;\n\t}\n\n\tOROCHI_CHECK_ERROR(oroFuncGetAttribute(&numRegs, attribute, compiled_kernel));\n\n\treturn numRegs;\n}\n\nint GPUKernel::get_kernel_attribute(oroFunction_attribute attribute) const\n{\n\tint numRegs = 0;\n\n\tif (m_kernel_function == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to get an attribute of a kernel that wasn't compiled yet.\");\n\n\t\treturn 0;\n\t}\n\n\tOROCHI_CHECK_ERROR(oroFuncGetAttribute(&numRegs, attribute, m_kernel_function));\n\n\treturn numRegs;\n}\n\nGPUKernelCompilerOptions& GPUKernel::get_kernel_options()\n{\n\treturn m_compiler_options;\n}\n\nconst GPUKernelCompilerOptions& GPUKernel::get_kernel_options() const\n{\n\treturn m_compiler_options;\n}\n\nvoid GPUKernel::synchronize_options_with(std::shared_ptr<GPUKernelCompilerOptions> other_options, const std::unordered_set<std::string>& options_excluded)\n{\n\tfor (auto macro_to_value : other_options->get_options_macro_map())\n\t{\n\t\tconst std::string& macro_name = macro_to_value.first;\n\t\tint macro_value = *macro_to_value.second;\n\n\t\tif (options_excluded.find(macro_name) == options_excluded.end())\n\t\t\t// Option is not excluded\n\t\t\tm_compiler_options.set_pointer_to_macro(macro_name, other_options->get_pointer_to_macro_value(macro_name));\n\t}\n\n\t// Same thing with the custom macros\n\tfor (auto macro_to_value : other_options->get_custom_macro_map())\n\t{\n\t\tconst std::string& macro_name = macro_to_value.first;\n\t\tint macro_value = *macro_to_value.second;\n\n\t\tif (options_excluded.find(macro_name) == options_excluded.end())\n\t\t\t// Option is not excluded\n\t\t\tm_compiler_options.set_pointer_to_macro(macro_name, other_options->get_pointer_to_macro_value(macro_name));\n\t}\n}\n\nvoid GPUKernel::launch(int block_size_x, int block_size_y, int nb_threads_x, int nb_threads_y, void** launch_args, oroStream_t stream)\n{\n\tlaunch_3D(block_size_x, block_size_y, 1, nb_threads_x, nb_threads_y, 1, launch_args, stream);\n}\n\nvoid GPUKernel::launch_3D(int block_size_x, int block_size_y, int block_size_z, int nb_threads_x, int nb_threads_y, int nb_threads_z, void** launch_args, oroStream_t stream)\n{\n\tint3 nb_groups;\n\tnb_groups.x = std::ceil(static_cast<float>(nb_threads_x) / block_size_x);\n\tnb_groups.y = std::ceil(static_cast<float>(nb_threads_y) / block_size_y);\n\tnb_groups.z = std::ceil(static_cast<float>(nb_threads_z) / block_size_z);\n\n\tOROCHI_CHECK_ERROR(oroModuleLaunchKernel(m_kernel_function, nb_groups.x, nb_groups.y, nb_groups.z, block_size_x, block_size_y, block_size_z, 0, stream, launch_args, 0));\n\tm_launched_at_least_once = true;\n}\n\nvoid GPUKernel::launch_synchronous(int block_size_x, int block_size_y, int nb_threads_x, int nb_threads_y, void** launch_args, float* execution_time_out)\n{\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_execution_start_event, 0));\n\n\tlaunch(block_size_x, block_size_y, nb_threads_x, nb_threads_y, launch_args, 0);\n\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_execution_stop_event, 0));\n\tOROCHI_CHECK_ERROR(oroEventSynchronize(m_execution_stop_event));\n\tif (execution_time_out != nullptr)\n\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(execution_time_out, m_execution_start_event, m_execution_stop_event));\n}\n\nvoid GPUKernel::parse_option_macros_used()\n{\n\tm_used_option_macros = g_gpu_kernel_compiler.get_option_macros_used_by_kernel(*this);\n\tm_option_macro_invalidated = false;\n}\n\nbool GPUKernel::uses_macro(const std::string& name) const\n{\n\treturn m_used_option_macros.find(name) != m_used_option_macros.end();\n}\n\nfloat GPUKernel::compute_execution_time()\n{\n\tif (!m_launched_at_least_once)\n\t\treturn 0.0f;\n\n\tfloat out;\n\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&out, m_execution_start_event, m_execution_stop_event));\n\n\tm_last_execution_time = out;\n\n\treturn out;\n}\n\nfloat GPUKernel::get_last_execution_time()\n{\n\treturn m_last_execution_time;\n}\n\nbool GPUKernel::has_been_compiled() const\n{\n\treturn m_kernel_function != nullptr;\n}\n\nbool GPUKernel::is_precompiled() const\n{\n\treturn m_is_precompiled_kernel;\n}\n\nvoid GPUKernel::set_precompiled(bool precompiled)\n{\n\tm_is_precompiled_kernel = precompiled;\n}\n\nvoid GPUKernel::launch_asynchronous(int block_size_x, int block_size_y, int nb_threads_x, int nb_threads_y, void** launch_args, oroStream_t stream)\n{\n\tlaunch_asynchronous_3D(block_size_x, block_size_y, 1, nb_threads_x, nb_threads_y, 1, launch_args, stream);\n}\n\nvoid GPUKernel::launch_asynchronous_3D(int block_size_x, int block_size_y, int block_size_z, int nb_threads_x, int nb_threads_y, int nb_threads_z, void** launch_args, oroStream_t stream)\n{\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_execution_start_event, stream));\n\n\tlaunch_3D(block_size_x, block_size_y, block_size_z, nb_threads_x, nb_threads_y, nb_threads_z, launch_args, stream);\n\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_execution_stop_event, stream));\n\n\t// TODO: There's an issue here on HIP 5.7 + Windows where without the oroLaunchHostFunc below,\n\t// this oroEventRecord (or any event after a kernel launch) \"blocks\" the stream (only on a non-NULL stream)\n\t// and oroStreamQuery always (kind of) returns hipErrorDeviceNotReady\n\toroLaunchHostFunc(stream, [](void*) {}, nullptr);\n}\n"
  },
  {
    "path": "src/Compiler/GPUKernel.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_KERNEL_H\n#define GPU_KERNEL_H\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n\n#include <hiprt/hiprt.h>\n#include <Orochi/Orochi.h>\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\nclass GPUKernel\n{\npublic:\n\tstatic const std::vector<std::string> COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS;\n\n\tGPUKernel();\n\tGPUKernel(const std::string& kernel_file_path, const std::string& kernel_function_name);\n\n\tstd::string get_kernel_file_path() const;\n\tstd::string get_kernel_function_name() const;\n\n\tvoid set_kernel_file_path(const std::string& kernel_file_path);\n\tvoid set_kernel_function_name(const std::string& kernel_function_name);\n\n\tvoid compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, std::vector<hiprtFuncNameSet> func_name_sets = {}, bool use_cache = true, bool silent = true);\n\t//void compile_silent(std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, std::vector<hiprtFuncNameSet> func_name_sets = {}, bool use_cache = true);\n\tvoid launch_synchronous(int tile_size_x, int tile_size_y, int res_x, int res_y, void** launch_args, float* execution_time_out = nullptr);\n\tvoid launch_asynchronous(int tile_size_x, int tile_size_y, int res_x, int res_y, void** launch_args, oroStream_t stream);\n\tvoid launch_asynchronous_3D(int tile_size_x, int tile_size_y, int tile_size_z, int res_x, int res_y, int res_z, void** launch_args, oroStream_t stream);\n\n\t/**\n\t * Sets an additional macro that will be passed to the GPU compiler when compiling this kernel\n\t */\n\tvoid add_additional_macro_for_compilation(const std::string& name, int value);\n\n\t/**\n\t * Returns a vector of strings of the form { -DMacroName=value, ... } from all the additional\n\t * macros that were added to this kernel by calling 'add_additional_macro_for_compilation()'\n\t */\n\tstd::vector<std::string> get_additional_compiler_macros() const;\n\n\t/**\n\t * Reads the kernel file and all of its includes to find what option macros this kernel uses.\n\t * \n\t * Calling this function update the m_used_option_macros member attribute.\n\t */\n\tvoid parse_option_macros_used();\n\n\t/**\n\t * Given an option macro name (\"InteriorStackStrategy\", \"DirectLightSamplingStrategy\", \"EnvmapSamplingStrategy\", ...\n\t * for examples. They are all defined in KernelOptions.h), returns true if the kernel uses that option macro.\n\t * False otherwise.\n\t * \n\t * The kernel \"uses\" that macro if changing the value of that macro and recompiling the kernel\n\t * changes the output of the compiler. For example, the camera ray kernel doesn't care about\n\t * which direct lighting sampling strategy we're using. It also doesn't care about our envmap\n\t * sampling strategy. So we way that the camera ray kernel doesn't use the\n\t * \"DirectLightSamplingStrategy\" and \"EnvmapSamplingStrategy\" options macro\n\t */\n\tbool uses_macro(const std::string& macro_name) const;\n\n\t/**\n\t * Returns the number of GPU register that this kernel is using. This function\n\t * must be called after the kernel has been compiled. \n\t * This function may also return 0 if the device doesn't support querrying\n\t * the number of registers\n\t */\n\tint get_kernel_attribute(oroFunction_attribute attribute) const;\n\tstatic int get_kernel_attribute(oroFunction compiled_kernel, oroFunction_attribute attribute);\n\n\t/**\n\t * Returns the compiler options of this kernel so that they can be modified\n\t */\n\tGPUKernelCompilerOptions& get_kernel_options();\n\tconst GPUKernelCompilerOptions& get_kernel_options() const;\n\n\t/**\n\t * Synchronizes the value of the options of this kernel with the values of the macros of 'other_options'.\n\t * This means that if the value of the macro \"MY_MACRO\" is modified in 'other_options', the value of 'MY_MACRO'\n\t * will also be modified in this kernel options.\n\t * \n\t * Macros that are in the 'options_excluded\" set will not be synchronized.\n\t * \n\t * Macros that are present in 'other_options' but that are not present in this kernel's option\n\t * will be added to this kernel and their vlaue will be synchronized with 'other_options'\n\t * \n\t * This function can be useful if you want to have a global set of macros shared by multiple kernels. \n\t * You can thus synchronize all your kernel with that global set of macros and when it is modified, \n\t * all the kernels will use the new values.\n\t */\n\tvoid synchronize_options_with(std::shared_ptr<GPUKernelCompilerOptions> other_options, const std::unordered_set<std::string>& options_excluded = {});\n\n\t/**\n\t * Returns the time taken for the last execution of this kernel in milliseconds\n\t */\n\tfloat compute_execution_time();\n\tfloat get_last_execution_time();\n\n\t/**\n\t * Structure used to pass data to the compute_elapsed_time_callback that computes the\n\t * elapsed time between the start and end events of this structure and stores the elapsed\n\t * time in 'elapsed_time_out'\n\t */\n\tstruct ComputeElapsedTimeCallbackData\n\t{\n\t\t// Start and end events to compute the elapsed time between\n\t\toroEvent_t start, end;\n\n\t\t// The elapsed time will be stored in here in milliseconds\n\t\tfloat* elapsed_time_out;\n\n\t\t// Needed to set the CUDA/HIP context as current to be able to call\n\t\t// CUDA/HIP functions from the callback\n\t\tHIPRTOrochiCtx* hiprt_orochi_ctx;\n\t};\n\n\tbool has_been_compiled() const;\n\n\tbool is_precompiled() const;\n\tvoid set_precompiled(bool precompiled);\n\nprivate:\n\tvoid launch(int tile_size_x, int tile_size_y, int res_x, int res_y, void** launch_args, oroStream_t stream);\n\tvoid launch_3D(int tile_size_x, int tile_size_y, int tile_size_z, int res_x, int res_y, int res_z, void** launch_args, oroStream_t stream);\n\n\tstd::string m_kernel_file_path = \"\";\n\tstd::string m_kernel_function_name = \"\";\n\n\t// Whether or not the kernel has been launched at least once\n\t// This is used to avoid CUDA/HIP errors when trying to read\n\t// the stop/start event elapsed time whereas the kernel has never\n\t// been launched\n\tbool m_launched_at_least_once = false;\n\t// GPU events to time the execution time\n\toroEvent_t m_execution_start_event = nullptr;\n\toroEvent_t m_execution_stop_event = nullptr;\n\tfloat m_last_execution_time = 0.0f;\n\n\t// Whether or not the macros used by this kernel have been modified recently.\n\t// Only adding new macros / removing macros invalidate the macros.\n\t// Changing the values of macros doesn't invalidate the macros.\n\t// This variable is used to determine whether or not we need to parse the kernel\n\t// source file to collect the macro actually used during the compilation of the kernel\n\tbool m_option_macro_invalidated = true;\n\n\t// Which option macros (as defined in KernelOptions.h) the kernel uses.\n\t// \n\t// See uses_macro() for some examples of what \"use\" means.\n\tstd::unordered_set<std::string> m_used_option_macros;\n\n\t// An additional map of macros to pass to the compiler for this kernel and their values.\n\t//\n\t// Example: { \"ReSTIR_DI_InitialCandidatesKernel\", 1 }\n\tstd::unordered_map<std::string, int> m_additional_compilation_macros;\n\n\t// Options/macros used by the compiler when compiling this kernel\n\tGPUKernelCompilerOptions m_compiler_options;\n\n\toroFunction m_kernel_function = nullptr;\n\n\t// If true, this means that this kernel is only used for precompilation and will be\n\t// discarded after it's been compiled\n\t// This is used in the GPUKernelCompiler to determine whether or not we should increment\n\t// the counter of the ImGuiLoggerLine that counts how many kernels have been precompiled\n\t// so far\n\tbool m_is_precompiled_kernel = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/Compiler/GPUKernelCompiler.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"Compiler/GPUKernelCompiler.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\n#include <chrono>\n#include <condition_variable>\n#include <deque>\n#include <mutex>\n\nGPUKernelCompiler g_gpu_kernel_compiler;\nextern ImGuiLogger g_imgui_logger;\n\nvoid enable_compilation_warnings(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, std::vector<std::string>& compiler_options)\n{\n\tif (std::string(hiprt_orochi_ctx->device_properties.name).find(\"NVIDIA\") == std::string::npos)\n\t{\n\t\t// AMD compiler warning suppressors\n\n\t\tcompiler_options.push_back(\"-Wall\");\n\t\tcompiler_options.push_back(\"-Weverything\");\n\t\tcompiler_options.push_back(\"-Wno-old-style-cast\");\n\t\tcompiler_options.push_back(\"-Wno-reorder-ctor\");\n\t\tcompiler_options.push_back(\"-Wno-c++98-compat\");\n\t\tcompiler_options.push_back(\"-Wno-c++98-compat-pedantic\");\n\t\tcompiler_options.push_back(\"-Wno-reserved-macro-identifier\");\n\t\tcompiler_options.push_back(\"-Wno-extra-semi-stmt\");\n\t\tcompiler_options.push_back(\"-Wno-reserved-identifier\");\n\t\tcompiler_options.push_back(\"-Wno-reserved-identifier\");\n\t\tcompiler_options.push_back(\"-Wno-float-conversion\");\n\t\tcompiler_options.push_back(\"-Wno-implicit-float-conversion\");\n\t\tcompiler_options.push_back(\"-Wno-implicit-int-float-conversion\");\n\t\tcompiler_options.push_back(\"-Wno-deprecated-copy-with-user-provided-copy\");\n\t\tcompiler_options.push_back(\"-Wno-disabled-macro-expansion\");\n\t\tcompiler_options.push_back(\"-Wno-float-equal\");\n\t\tcompiler_options.push_back(\"-Wno-sign-compare\");\n\t\tcompiler_options.push_back(\"-Wno-padded\");\n\t\tcompiler_options.push_back(\"-Wno-sign-conversion\");\n\t\tcompiler_options.push_back(\"-Wno-gnu-zero-variadic-macro-arguments\");\n\t\tcompiler_options.push_back(\"-Wno-missing-variable-declarations\");\n\t}\n}\n\noroFunction_t GPUKernelCompiler::compile_kernel(GPUKernel& kernel, const GPUKernelCompilerOptions& kernel_compiler_options, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, hiprtFuncNameSet* function_name_sets, int num_geom_types, int num_ray_types, bool use_cache, const std::string& additional_cache_key, bool silent)\n{\n\tstd::string kernel_file_path = kernel.get_kernel_file_path();\n\tstd::string kernel_function_name = kernel.get_kernel_function_name();\n\tconst std::vector<std::string>& additional_include_dirs = GPUKernel::COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS;\n\tstd::vector<std::string> compiler_options = kernel_compiler_options.get_relevant_macros_as_std_vector_string(&kernel);\n\n#ifndef OROCHI_ENABLE_CUEW\n\tcompiler_options.push_back(\"-g\");\n\tcompiler_options.push_back(\"-ggdb\");\n#endif\n\t\n\t// enable_compilation_warnings(hiprt_orochi_ctx, compiler_options);\n\t// compiler_options.push_back(\"-g\");\n\t// compiler_options.push_back(\"-ggdb\");\n\n\t// Locking because neither NVIDIA or AMD can compile kernels on multiple threads at the same time\n\t// with their runtime API (but NVCC/HIPCC can compile in parallel with the commandline) so we may as well\n\t// lock here to have better control on when to compile a kernel as well as have proper compilation times\n\tstd::unique_lock<std::mutex> lock(m_compile_mutex);\n\n\tauto start = std::chrono::high_resolution_clock::now();\n\n\thiprtApiFunction trace_function_out;\n\tbool use_shader_cache;\n\tif (m_shader_cache_force_usage == GPUKernelCompiler::ShaderCacheUsageOverride::FORCE_SHADER_CACHE_OFF)\n\t\tuse_shader_cache = false;\n\telse if (m_shader_cache_force_usage == GPUKernelCompiler::ShaderCacheUsageOverride::FORCE_SHADER_CACHE_ON)\n\t\tuse_shader_cache = true;\n\telse\n\t\tuse_shader_cache = use_cache;\n\n\thiprtError compile_status = HIPPTOrochiUtils::build_trace_kernel(hiprt_orochi_ctx->hiprt_ctx, kernel_file_path, kernel_function_name, trace_function_out, additional_include_dirs, compiler_options, num_geom_types, num_ray_types, use_shader_cache, function_name_sets, additional_cache_key);\n\tif (compile_status != hiprtError::hiprtSuccess)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Unable to compile kernel \\\"%s\\\". Cannot continue.\", kernel_function_name.c_str());\n\n\t\treturn nullptr;\n\t}\n\n\toroFunction kernel_function = reinterpret_cast<oroFunction>(trace_function_out);\n\n\tif (kernel.is_precompiled())\n\t{\n\t\t// Updating the logs\n\t\tm_precompiled_kernels_compilation_ended++;\n\n\t\tg_imgui_logger.update_line(ImGuiLogger::BACKGROUND_KERNEL_COMPILATION_LINE_NAME, \"Compiling kernel permutations in the background... [%d / %d]\", m_precompiled_kernels_compilation_ended.load(), m_precompiled_kernels_parsing_started.load());\n\t}\n\n\tauto stop = std::chrono::high_resolution_clock::now();\n\n\tif (!silent)\n\t{\n\t\t// Setting the current context is necessary because getting\n\t\t// functions attributes necessitates calling CUDA/HIP functions\n\t\t// which need their context to be current if not calling from\n\t\t// the main thread (which we are not if we are compiling kernels on multithreads)\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(hiprt_orochi_ctx->orochi_ctx));\n\n\t\tint nb_reg = GPUKernel::get_kernel_attribute(kernel_function, ORO_FUNC_ATTRIBUTE_NUM_REGS);\n\t\tint nb_shared = GPUKernel::get_kernel_attribute(kernel_function, ORO_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES);\n\t\tint nb_local = GPUKernel::get_kernel_attribute(kernel_function, ORO_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES);\n\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Kernel \\\"%s\\\" compiled in %ldms.\\n\\t[Reg, Shared, Local] = [%d, %d, %d]\\n\", kernel_function_name.c_str(), std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count(), nb_reg, nb_shared, nb_local);\n\t}\n\n\treturn kernel_function;\n}\n\nstd::string GPUKernelCompiler::find_in_include_directories(const std::string& include_name, const std::vector<std::string>& include_directories)\n{\n\tfor (const std::string& include_directory : include_directories)\n\t{\n\t\tstd::string add_slash = include_directory[include_directory.length() - 1] != '/' ? \"/\" : \"\";\n\t\tstd::string file_path = include_directory + add_slash + include_name;\n\t\tstd::ifstream try_open_file(file_path);\n\t\tif (try_open_file.is_open())\n\t\t\treturn file_path;\n\t}\n\n\treturn \"\";\n}\n\nvoid GPUKernelCompiler::read_includes_of_file(const std::string& include_file_path, const std::vector<std::string>& include_directories, std::unordered_set<std::string>& output_includes)\n{\n\tstd::ifstream include_file(include_file_path);\n\tif (include_file.is_open())\n\t{\n\t\tstd::string line;\n\t\twhile (std::getline(include_file, line))\n\t\t{\n\t\t\tif (line.starts_with(\"#include \"))\n\t\t\t{\n\t\t\t\tsize_t find_start = line.find('<');\n\t\t\t\tif (find_start == std::string::npos)\n\t\t\t\t{\n\t\t\t\t\t// Trying to find a quote instead\n\t\t\t\t\tfind_start = line.find('\"');\n\t\t\t\t\tif (find_start == std::string::npos)\n\t\t\t\t\t\t// Couldn't find a quote either, ill-formed include\n\t\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tsize_t find_end = line.rfind('>');\n\t\t\t\tif (find_end == std::string::npos)\n\t\t\t\t{\n\t\t\t\t\t// Trying to find a quote instead\n\t\t\t\t\tfind_end = line.rfind('\"');\n\t\t\t\t\tif (find_end == std::string::npos)\n\t\t\t\t\t\t// Couldn't find a quote either, ill-formed include\n\t\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// We found the include string, now we're going to check whether it can be found\n\t\t\t\t// in the given includes directories (which contain the only includes that we're\n\t\t\t\t// interested in)\n\n\t\t\t\t// Include file with leading Device/includes/... or whatever folder the include may come from\n\t\t\t\tstd::string full_include_name = line.substr(find_start + 1, find_end - find_start - 1);\n\n\t\t\t\t// We have only the file name (which looks like \"MyInclude.h\" for example), let's see\n\t\t\t\t// if it can be found in the include directories\n\t\t\t\tstd::string include_file_path = find_in_include_directories(full_include_name, include_directories);\n\n\t\t\t\tif (!include_file_path.empty())\n\t\t\t\t\t// Adding to file path that can directly be opened in an std::ifstream\n\t\t\t\t\toutput_includes.insert(include_file_path);\n\t\t\t}\n\t\t\telse\n\t\t\t\tcontinue;\n\t\t}\n\n\t}\n\telse\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Could not generate additional cache key for kernel with path \\\"%s\\\": %s\", include_file_path.c_str(), strerror(errno));\n\n\t\tUtils::debugbreak();\n\t}\n}\n\nstd::unordered_set<std::string> GPUKernelCompiler::read_option_macro_of_file(const std::string& filepath)\n{\n\tstd::string file_modification_time;\n\n\ttry\n\t{\n\t\tstd::chrono::time_point modification_time = std::filesystem::last_write_time(filepath);\n\n\t\tfile_modification_time = std::to_string(modification_time.time_since_epoch().count());\n\t}\n\tcatch (std::filesystem::filesystem_error e)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"HIPKernelCompiler - Unable to open include file \\\"%s\\\" for option macros analyzing: %s\", filepath.c_str(), e.what());\n\n\t\treturn std::unordered_set<std::string>();\n\t}\n\n\t{\n\t\t// We don't to read into the cache while someone may be writing to it (at the end of this function)\n\t\t// so we lock\n\t\tstd::lock_guard<std::mutex> lock(m_option_macro_cache_mutex);\n\n\t\tauto cache_timestamp_find = m_filepath_to_options_macros_cache_timestamp.find(filepath);\n\t\tif (cache_timestamp_find != m_filepath_to_options_macros_cache_timestamp.end() && cache_timestamp_find->second == file_modification_time)\n\t\t{\n\t\t\t// Cache hit\n\t\t\treturn m_filepath_to_option_macros_cache[filepath];\n\t\t}\n\t}\n\n\tstd::unordered_set<std::string> option_macros;\n\tstd::ifstream include_file(filepath);\n\tif (include_file.is_open())\n\t{\n\t\tstd::string line;\n\t\twhile (std::getline(include_file, line))\n\t\t\tfor (const std::string& existing_macro_option : GPUKernelCompilerOptions::ALL_MACROS_NAMES)\n\t\t\t\tif (line.find(existing_macro_option) != std::string::npos)\n\t\t\t\t\toption_macros.insert(existing_macro_option);\n\n\t}\n\telse\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Could not open file \\\"%s\\\" for reading option macros used by that file: %s\", filepath.c_str(), strerror(errno));\n\n\t// The cache is shared to all threads using this GPUKernelCompiler so we're locking that operation\n\t// The lock is destroyed when the function returns\n\tstd::lock_guard<std::mutex> lock(m_option_macro_cache_mutex);\n\n\t// Updating the cache\n\tm_filepath_to_option_macros_cache[filepath] = option_macros;\n\tm_filepath_to_options_macros_cache_timestamp[filepath] = file_modification_time;\n\n\treturn option_macros;\n}\n\nstd::string GPUKernelCompiler::get_additional_cache_key(GPUKernel& kernel)\n{\n\tm_additional_cache_key_started++;\n\n\tstd::unordered_set<std::string> already_processed_includes;\n\tstd::deque<std::string> yet_to_process_includes;\n\tyet_to_process_includes.push_back(kernel.get_kernel_file_path());\n\n\twhile (!yet_to_process_includes.empty())\n\t{\n\t\tstd::string current_file = yet_to_process_includes.front();\n\t\tyet_to_process_includes.pop_front();\n\n\t\tif (already_processed_includes.find(current_file) != already_processed_includes.end())\n\t\t\t// We've already processed that file\n\t\t\tcontinue;\n\n\t\talready_processed_includes.insert(current_file);\n\n\t\tstd::unordered_set<std::string> new_includes;\n\t\tread_includes_of_file(current_file, GPUKernel::COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS, new_includes);\n\n\t\tfor (const std::string& new_include : new_includes)\n\t\t\tyet_to_process_includes.push_back(new_include);\n\t}\n\n\t// The cache key is going to be the concatenation of the last modified times of all the includes\n\t// that the kernel file we just parsed depends on. That way, if any dependency of this kernel has\n\t// been modified, the cache key will be different and the cache will be invalidated.\n\tstd::string final_cache_key = \"\";\n\tfor (const std::string& include : already_processed_includes)\n\t{\n\t\t// TODO this exception here should probably go up a level so that we can know that the kernel compilation failed --> set the kernel function to nullptr --> do try to launch the kernel (otherwise this will probably crash the driver)\n\t\ttry\n\t\t{\n\t\t\tstd::chrono::time_point modification_time = std::filesystem::last_write_time(include);\n\n\t\t\tfinal_cache_key += std::to_string(modification_time.time_since_epoch().count());\n\t\t}\n\t\tcatch (std::filesystem::filesystem_error e)\n\t\t{\n\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"HIPKernelCompiler - Unable to open include file \\\"%s\\\" for shader cache validation: %s\", include.c_str(), e.what());\n\n\t\t\tm_additional_cache_key_ended++;\n\t\t\t// Notifying the condition variable that's used to\n\t\t\t// avoid exiting the application with ongoing IO operations\n\t\t\tm_read_macros_cv.notify_all();\n\n\t\t\treturn \"\";\n\t\t}\n\t}\n\n\tm_additional_cache_key_ended++;\n\t// Notifying the condition variable that's used to\n\t// avoid exiting the application with ongoing IO operations\n\tm_read_macros_cv.notify_all();\n\n\treturn final_cache_key;\n}\n\nstd::unordered_set<std::string> GPUKernelCompiler::get_option_macros_used_by_kernel(const GPUKernel& kernel)\n{\n\tif (kernel.is_precompiled())\n\t\t// If this kernel is being precompiled, we can increment the counter\n\t\t// used for logging\n\t\tm_precompiled_kernels_parsing_started++;\n\n\t// Limiting the number of threads that can get in here at the same time otherwise we may\n\t// get some \"Too many files open!\" error\n\tm_read_macros_semaphore.acquire();\n\n\tstd::unordered_set<std::string> already_processed_includes;\n\tstd::deque<std::string> yet_to_process_includes;\n\tyet_to_process_includes.push_back(kernel.get_kernel_file_path());\n\n\twhile (!yet_to_process_includes.empty())\n\t{\n\t\tstd::string current_file = yet_to_process_includes.front();\n\t\tyet_to_process_includes.pop_front();\n\n\t\tif (already_processed_includes.find(current_file) != already_processed_includes.end())\n\t\t\t// We've already processed that file\n\t\t\tcontinue;\n\t\telse if (current_file.find(\"HostDeviceCommon/KernelOptions\") != std::string::npos)\n\t\t\t// Ignoring kernel options files when looking for option macros\n\t\t\tcontinue;\n\t\telse if (current_file.find(\"Device/\") == std::string::npos && current_file.find(\"HostDeviceCommon/\") == std::string::npos)\n\t\t\t// Excluding files that are not in the Device/ or HostDeviceCommon/ folder because we're only\n\t\t\t// interested in kernel files, not CPU C++ files\n\t\t\tcontinue;\n\n\t\talready_processed_includes.insert(current_file);\n\n\t\tstd::unordered_set<std::string> new_includes;\n\t\tread_includes_of_file(current_file, GPUKernel::COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS, new_includes);\n\n\t\tfor (const std::string& new_include : new_includes)\n\t\t\tyet_to_process_includes.push_back(new_include);\n\t}\n\n\tstd::unordered_set<std::string> option_macro_names;\n\tfor (const std::string& include : already_processed_includes)\n\t{\n\t\tstd::unordered_set<std::string> include_option_macros = read_option_macro_of_file(include);\n\n\t\tfor (const std::string& option_macro : include_option_macros)\n\t\t\toption_macro_names.insert(option_macro);\n\t}\n\n\tm_read_macros_semaphore.release();\n\tm_read_macros_cv.notify_all();\n\n\tif (kernel.is_precompiled())\n\t{\n\t\t// If this kernel is being precompiled, we can increment the counter\n\t\t// used for logging\n\t\tm_precompiled_kernels_parsing_ended++;\n\n\t\t// And update the log line\n\t\tg_imgui_logger.update_line(ImGuiLogger::BACKGROUND_KERNEL_PARSING_LINE_NAME, \"Parsing kernel permutations in the background... [%d / %d]\", m_precompiled_kernels_parsing_ended.load(), m_precompiled_kernels_parsing_started.load());\n\t}\n\n\n\treturn option_macro_names;\n}\n\nvoid GPUKernelCompiler::wait_compiler_file_operations()\n{\n\tstd::mutex mutex;\n\tstd::unique_lock<std::mutex> lock(mutex);\n\n\tm_read_macros_cv.wait(lock, [this]() { return m_precompiled_kernels_parsing_started == m_precompiled_kernels_parsing_ended; });\n\tm_read_macros_cv.wait(lock, [this]() { return m_additional_cache_key_started == m_additional_cache_key_ended; });\n}\n\nGPUKernelCompiler::ShaderCacheUsageOverride GPUKernelCompiler::get_shader_cache_usage_override() const\n{\n\treturn m_shader_cache_force_usage;\n}\n\nvoid GPUKernelCompiler::set_shader_cache_usage_override(GPUKernelCompiler::ShaderCacheUsageOverride override_usage)\n{\n\tm_shader_cache_force_usage = override_usage;\n}\n\n"
  },
  {
    "path": "src/Compiler/GPUKernelCompiler.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_KERNEL_COMPILER_H\n#define GPU_KERNEL_COMPILER_H\n\n#include \"Compiler/GPUKernel.h\"\n\n#include <mutex>\n#include <semaphore>\n#include <unordered_map>\n#include <unordered_set>\n#include <condition_variable>\n\n\nclass GPUKernelCompiler\n{\npublic:\n\tenum ShaderCacheUsageOverride\n\t{\n\t\tFORCE_SHADER_CACHE_DEFAULT,\n\t\tFORCE_SHADER_CACHE_OFF,\n\t\tFORCE_SHADER_CACHE_ON,\n\t};\n\n\toroFunction_t compile_kernel(GPUKernel& kernel, const GPUKernelCompilerOptions& kernel_compiler_options, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, hiprtFuncNameSet* function_name_sets, int num_geom_types, int num_ray_types, bool use_cache, const std::string& additional_cache_key, bool silent = false);\n\n\t/**\n\t * Takes an include name (\"Device/includes/MyInclude.h\" for example) and a list of include directories.\n\t * If the given include can be found in one the given include directories, the concatenation of the\n\t * include directory with the include name is returned\n\t *\n\t * If it cannot be found, the empty string is returned\n\t */\n\tstd::string find_in_include_directories(const std::string& include_name, const std::vector<std::string>& include_directories);\n\n\t/**\n\t * Reads the given file (include_file_path) and fills the 'output_includes' parameters with the includes (#include \"XXX\" pr #include <XXX>) \n\t * used by that file.\n\t * \n\t * Only includes that can be found in the given 'include_directories' will be added to the output parameter, others will be\n\t * ignored.\n\t */\n\tvoid read_includes_of_file(const std::string& include_file_path, const std::vector<std::string>& include_directories, std::unordered_set<std::string>& output_includes);\n\n\t/**\n\t * Returns a list of the option macro used in the given file.\n\t * \"Used\" means that the option macro is used in #if, #ifdef or equivalent directives\n\t */\n\tstd::unordered_set<std::string> read_option_macro_of_file(const std::string& filepath);\n\n\t/**\n\t * Returns a string that consists of the concatenation of the include dependencies of the given kernel.\n\t * For example, if the given kernel has includes \"Include1.h\" and \"Include2.h\" and that Include2.h itself\n\t contains \"Include3.h\", the returned string will be the concatenation of the last modification time\n\t (on the hard drive) of these 3 files, which may look something like this:\n\t *\n\t * \"133378594621\" + \"13334848655\" + \"1331849841\" = \"133378594621133348486551331849841\".\n\t *\n\t * Note that the timestamps are \"time since epoch\" so that's why they're pretty unintelligible.\n\t *\n\t * The returned string, so-called \"additional cache key\", can be used to determine whether or not a GPU\n\t * shader needs to be recompiled or not by passing it to the HIPRT compiler which will take it into account\n\t * into the hash used to determine whether a file is up to date or not.\n\t *\n\t * Note that only includes that can be found in the additional include directories of the given kernel\n\t * (the parameter of this function) are going to be considered for the concatenation of time stamps. Includes\n\t * that cannot be found in the kernel's include directories are ignored (this prevents the issue of losing\n\t * ourselves in the parsing of stdlib headers for example. stdlib headers will be ignored since they are not\n\t * [probably] in the include directories of the kernel).\n\t */\n\tstd::string get_additional_cache_key(GPUKernel& kernel);\n\n\t/**\n\t * Returns a list of the option macro names used by the given kernel.\n\t * \n\t * For example, this function will return {\"DirectLightSamplingStrategy\", \"EnvmapSamplingStrategy\"}\n\t * if the given kernel uses this two macros (if the kernel has some \"#if == DirectLightSamplingStrategy\", \"#ifdef DirectLightSamplingStrategy\"\n\t * directives or similar in its code)\n\t */\n\tstd::unordered_set<std::string> get_option_macros_used_by_kernel(const GPUKernel& kernel);\n\n\t/**\n\t * For background kernel precompilation, threads compiling kernels actually open the kernel\n\t * files on the disk to read the macros used by the kernels.\n\t * \n\t * If the application is closed while these files are being opened, this can SEGFAULT.\n\t * \n\t * This function blocks the calling thread until all threads have parsed the kernel files and\n\t * thus avoids a crash.\n\t * \n\t * This function is mostly called when exiting the Renderwindow\n\t */\n\tvoid wait_compiler_file_operations();\n\n\t/**\n\t * Possible values for `override_usage`:\n\t * \n\t *\t- GPUKernelCompiler::ShaderCacheUsageOverride::FORCE_SHADER_CACHE_OFF\t\t\t\n\t *\t\t--> The shader compiler will be forced *not* to use the shader cache\n\t *\t- GPUKernelCompiler::ShaderCacheUsageOverride::FORCE_SHADER_CACHE_ON      \n\t *\t\t--> The shader compiler will be forced to use the shader cache\n\t *\t- GPUKernelCompiler::ShaderCacheUsageOverride::FORCE_SHADER_CACHE_DEFAULT \n\t *\t\t--> The shader compiler will use whatever 'use_cache' parameter is passed to 'GPUKernelCompiler::compile_kernel()'\n\t */\n\tvoid set_shader_cache_usage_override(ShaderCacheUsageOverride override_usage);\n\n\tShaderCacheUsageOverride get_shader_cache_usage_override() const;\n\nprivate:\n\t// Cache that maps a filepath to the option macros that it contains.\n\t// This saves us having to reparse the file to find the options macros\n\t// if the file was already parsed for another kernel by this GPUKernelCompiler\n\tstd::unordered_map<std::string, std::unordered_set<std::string>> m_filepath_to_option_macros_cache;\n\t// Maps filepath to the last modification time of the file pointed by the filepath. \n\t// Useful to invalidate the cache if the file was modified (meaning that the option\n\t// macros used by that file may have changed so we have to reparse the file)\n\tstd::unordered_map<std::string, std::string> m_filepath_to_options_macros_cache_timestamp;\n\n\t// Because this GPUKernelCompiler may be used by multiple threads at the same time,\n\t// we may use that mutex sometimes to protect from race conditions\n\tstd::mutex m_option_macro_cache_mutex;\n\tstd::mutex m_compile_mutex;\n\n\t// Semaphore used by 'get_option_macros_used_by_kernel' so that not too many threads\n\t// read kernel files at the same time: this can cause a \"Too many files open\" error\n\t// \n\t// Limiting to a number of maximum threads at a time\n\tstd::counting_semaphore<> m_read_macros_semaphore { 1 };\n\tstd::condition_variable m_read_macros_cv;\n\n\t// Counters for logging the progress of background kernel precompilation\n\t// These variables are also used for making sure that all threads have completed\n\t// their IO operations before exiting the app\n\tstd::atomic<int> m_precompiled_kernels_parsing_started = 0;\n\tstd::atomic<int> m_precompiled_kernels_parsing_ended = 0;\n\tstd::atomic<int> m_additional_cache_key_started = 0;\n\tstd::atomic<int> m_additional_cache_key_ended = 0;\n\tstd::atomic<int> m_precompiled_kernels_compilation_ended = 0;\n\n\tShaderCacheUsageOverride m_shader_cache_force_usage = ShaderCacheUsageOverride::FORCE_SHADER_CACHE_DEFAULT;\n};\n\n#endif\n"
  },
  {
    "path": "src/Compiler/GPUKernelCompilerOptions.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernel.h\"\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"Utils/Utils.h\"\n\n#include <cassert>\n\n/**\n * Defining the strings that go with the option so that they can be passed to the shader compiler\n * with the -D<string>=<value> option.\n * \n * The strings used here must match the ones used in KernelOptions.h\n */\nconst std::string GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL = \"UseSharedStackBVHTraversal\";\nconst std::string GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE = \"SharedStackBVHTraversalSize\";\nconst std::string GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_BLOCK_SIZE = \"KernelWorkgroupThreadCount\";\nconst std::string GPUKernelCompilerOptions::DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE = \"DoFirstBounceWarpDirectionReuse\";\nconst std::string GPUKernelCompilerOptions::DISPLAY_ONLY_SAMPLE_N = \"DisplayOnlySampleN\";\n\nconst std::string GPUKernelCompilerOptions::BSDF_OVERRIDE = \"BSDFOverride\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DIFFUSE_LOBE = \"PrincipledBSDFDiffuseLobe\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION = \"PrincipledBSDFDoEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION = \"PrincipledBSDFDoGlassEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION = \"PrincipledBSDFDoClearcoatEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION = \"PrincipledBSDFDoMetallicEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION = \"PrincipledBSDFDoMetallicFresnelEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION = \"PrincipledBSDFDoSpecularEnergyCompensation\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION = \"PrincipledBSDFDeltaDistributionEvaluationOptimization\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL = \"PrincipledBSDFSampleGlossyBasedOnFresnel\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL = \"PrincipledBSDFSampleCoatBasedOnFresnel\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION = \"PrincipledBSDFDoMicrofacetRegularization\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION = \"PrincipledBSDFDoMicrofacetRegularizationConsistentParameterization\";\nconst std::string GPUKernelCompilerOptions::PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC= \"PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic\";\nconst std::string GPUKernelCompilerOptions::GGX_SAMPLE_FUNCTION = \"PrincipledBSDFAnisotropicGGXSampleFunction\";\nconst std::string GPUKernelCompilerOptions::NESTED_DIELETRCICS_STACK_SIZE_OPTION = \"NestedDielectricsStackSize\";\n\nconst std::string GPUKernelCompilerOptions::TRIANGLE_POINT_SAMPLING_STRATEGY = \"TrianglePointSamplingStrategy\";\n\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_DO_LIGHT_PRESAMPLING = \"ReGIR_GridFillDoLightPresampling\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY = \"ReGIR_GridFillLightSamplingBaseStrategy\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_VISIBILITY = \"ReGIR_GridFillTargetFunctionVisibility\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM = \"ReGIR_GridFillTargetFunctionCosineTerm\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM_LIGHT_SOURCE = \"ReGIR_GridFillTargetFunctionCosineTermLightSource\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_PRIMARY_HITS_TARGET_FUNCTION_BSDF = \"ReGIR_GridFillPrimaryHitsTargetFunctionBSDF\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_SECONDARY_HITS_TARGET_FUNCTION_BSDF = \"ReGIR_GridFillSecondaryHitsTargetFunctionBSDF\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION = \"ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation\";\nconst std::string GPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION = \"ReGIR_GridFillSpatialReuse_AccumulatePreIntegration\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_VISIBILITY = \"ReGIR_ShadingResamplingTargetFunctionVisibility\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY = \"ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESMAPLING_JITTER_CANONICAL_CANDIDATES = \"ReGIR_ShadingResamplingJitterCanonicalCandidates\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_BSDF_MIS = \"ReGIR_ShadingResamplingDoBSDFMIS\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_MIS_PAIRWISE_MIS = \"ReGIR_ShadingResamplingDoMISPairwiseMIS\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_SHADE_ALL_SAMPLES = \"ReGIR_ShadingResamplingShadeAllSamples\";\nconst std::string GPUKernelCompilerOptions::REGIR_FALLBACK_LIGHT_SAMPLING_STRATEGY = \"ReGIR_FallbackLightSamplingStrategy\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE = \"ReGIR_HashGridCollisionResolutionMode\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS = \"ReGIR_HashGridCollisionResolutionMaxSteps\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL = \"ReGIR_HashGridHashSurfaceNormal\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS = \"ReGIR_HashGridHashSurfaceNormalResolutionPrimaryHits\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS = \"ReGIR_HashGridHashSurfaceNormalResolutionSecondaryHits\";\nconst std::string GPUKernelCompilerOptions::REGIR_SHADING_JITTER_TRIES = \"ReGIR_ShadingJitterTries\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_ADAPTIVE_ROUGHNESS_GRID_PRECISION = \"ReGIR_HashGridAdaptiveRoughnessGridPrecision\";\nconst std::string GPUKernelCompilerOptions::REGIR_HASH_GRID_CONSTANT_GRID_CELL_SIZE = \"ReGIR_HashGridConstantGridCellSize\";\nconst std::string GPUKernelCompilerOptions::REGIR_DEBUG_MODE = \"ReGIR_DebugMode\";\n\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY = \"DirectLightSamplingStrategy\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY = \"DirectLightSamplingBaseStrategy\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT = \"DirectLightSamplingNEESampleCount\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS = \"DirectLightUseNEEPlusPlus\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE = \"DirectLightUseNEEPlusPlusRR\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED = \"DirectLightNEEPlusPlusDisplayShadowRaysDiscarded\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED_BOUNCE = \"DirectLightNEEPlusPlusDisplayShadowRaysDiscardedBounce\";\nconst std::string GPUKernelCompilerOptions::NEE_PLUS_PLUS_LINEAR_PROBING_STEPS = \"NEEPlusPlus_LinearProbingSteps\";\nconst std::string GPUKernelCompilerOptions::NEE_PLUS_PLUS_DEBUG_MODE = \"NEEPlusPlusDebugMode\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION = \"DirectLightSamplingDeltaDistributionOptimization\";\nconst std::string GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS = \"DirectLightSamplingAllowBackfacingLights\";\nconst std::string GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION = \"RISUseVisiblityTargetFunction\";\nconst std::string GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY = \"EnvmapSamplingStrategy\";\nconst std::string GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BSDF_MIS = \"EnvmapSamplingDoBSDFMIS\";\nconst std::string GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BILINEAR_FILTERING = \"EnvmapSamplingDoBilinearFiltering\";\n\nconst std::string GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY = \"PathSamplingStrategy\";\n\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY = \"ReSTIR_DI_InitialTargetFunctionVisibility\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY = \"ReSTIR_DI_SpatialTargetFunctionVisibility\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE = \"ReSTIR_DI_DoVisibilityReuse\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY = \"ReSTIR_DI_BiasCorrectionUseVisibility\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS = \"ReSTIR_DI_BiasCorrectionWeights\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY = \"ReSTIR_DI_LaterBouncesSamplingStrategy\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING = \"ReSTIR_DI_DoLightPresampling\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY = \"ReSTIR_DI_LightPresamplingStrategy\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT = \"ReSTIR_DI_SpatialDirectionalReuseBitCount\";\nconst std::string GPUKernelCompilerOptions::RESTIR_DI_DO_OPTIMAL_VISIBILITY_SAMPLING = \"ReSTIR_DI_DoOptimalVisibilitySampling\";\n\nconst std::string GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY = \"ReSTIR_GI_SpatialTargetFunctionVisibility\";\nconst std::string GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT = \"ReSTIR_GI_SpatialDirectionalReuseBitCount\";\nconst std::string GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY = \"ReSTIR_GI_BiasCorrectionUseVisibility\";\nconst std::string GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS = \"ReSTIR_GI_BiasCorrectionWeights\";\nconst std::string GPUKernelCompilerOptions::RESTIR_GI_DO_OPTIMAL_VISIBILITY_SAMPLING = \"ReSTIR_GI_DoOptimalVisibilitySampling\";\n\nconst std::string GPUKernelCompilerOptions::GMON_M_SETS_COUNT = \"GMoNMSetsCount\";\n\nconst std::unordered_set<std::string> GPUKernelCompilerOptions::ALL_MACROS_NAMES = {\n\tGPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL,\n\tGPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE,\n\tGPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_BLOCK_SIZE,\n\tGPUKernelCompilerOptions::DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE,\n\tGPUKernelCompilerOptions::DISPLAY_ONLY_SAMPLE_N,\n\n\tGPUKernelCompilerOptions::BSDF_OVERRIDE,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DIFFUSE_LOBE,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION,\n\tGPUKernelCompilerOptions::PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC ,\n\tGPUKernelCompilerOptions::GGX_SAMPLE_FUNCTION,\n\tGPUKernelCompilerOptions::NESTED_DIELETRCICS_STACK_SIZE_OPTION,\n\n\tGPUKernelCompilerOptions::TRIANGLE_POINT_SAMPLING_STRATEGY,\n\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_DO_LIGHT_PRESAMPLING,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_VISIBILITY,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM_LIGHT_SOURCE,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_PRIMARY_HITS_TARGET_FUNCTION_BSDF,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_SECONDARY_HITS_TARGET_FUNCTION_BSDF,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION,\n\tGPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_VISIBILITY,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESMAPLING_JITTER_CANONICAL_CANDIDATES,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_BSDF_MIS,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_MIS_PAIRWISE_MIS,\n\tGPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_SHADE_ALL_SAMPLES,\n\tGPUKernelCompilerOptions::REGIR_FALLBACK_LIGHT_SAMPLING_STRATEGY,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS,\n\tGPUKernelCompilerOptions::REGIR_SHADING_JITTER_TRIES,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_ADAPTIVE_ROUGHNESS_GRID_PRECISION,\n\tGPUKernelCompilerOptions::REGIR_HASH_GRID_CONSTANT_GRID_CELL_SIZE,\n\tGPUKernelCompilerOptions::REGIR_DEBUG_MODE,\n\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED_BOUNCE,\n\tGPUKernelCompilerOptions::NEE_PLUS_PLUS_LINEAR_PROBING_STEPS,\n\tGPUKernelCompilerOptions::NEE_PLUS_PLUS_DEBUG_MODE,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION,\n\tGPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS,\n\tGPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION,\n\tGPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY,\n\tGPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BSDF_MIS,\n\tGPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BILINEAR_FILTERING,\n\n\tGPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY,\n\n\tGPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY,\n\tGPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY,\n\tGPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE,\n\tGPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY,\n\tGPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS,\n\tGPUKernelCompilerOptions::RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY,\n\tGPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING,\n\tGPUKernelCompilerOptions::RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY,\n\tGPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT,\n\tGPUKernelCompilerOptions::RESTIR_DI_DO_OPTIMAL_VISIBILITY_SAMPLING,\n\n\tGPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY,\n\tGPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT,\n\tGPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY,\n\tGPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS,\n\tGPUKernelCompilerOptions::RESTIR_GI_DO_OPTIMAL_VISIBILITY_SAMPLING,\n\n\tGPUKernelCompilerOptions::GMON_M_SETS_COUNT,\n};\n\nGPUKernelCompilerOptions::GPUKernelCompilerOptions()\n{\n\t// Mandatory options that every kernel must have so we're\n\t// adding them here with their default values\n\tm_options_macro_map[GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL] = std::make_shared<int>(UseSharedStackBVHTraversal);\n\tm_options_macro_map[GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE] = std::make_shared<int>(SharedStackBVHTraversalSize);\n\tm_options_macro_map[GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_BLOCK_SIZE] = std::make_shared<int>(KernelWorkgroupThreadCount);\n\tm_options_macro_map[GPUKernelCompilerOptions::DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE] = std::make_shared<int>(DoFirstBounceWarpDirectionReuse);\n\tm_options_macro_map[GPUKernelCompilerOptions::DISPLAY_ONLY_SAMPLE_N] = std::make_shared<int>(DisplayOnlySampleN);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::BSDF_OVERRIDE] = std::make_shared<int>(BSDFOverride);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DIFFUSE_LOBE] = std::make_shared<int>(PrincipledBSDFDiffuseLobe);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoGlassEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoClearcoatEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoMetallicEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoMetallicFresnelEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION] = std::make_shared<int>(PrincipledBSDFDoSpecularEnergyCompensation);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION] = std::make_shared<int>(PrincipledBSDFDeltaDistributionEvaluationOptimization);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL] = std::make_shared<int>(PrincipledBSDFSampleGlossyBasedOnFresnel);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL] = std::make_shared<int>(PrincipledBSDFSampleCoatBasedOnFresnel);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION] = std::make_shared<int>(PrincipledBSDFDoMicrofacetRegularization);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION] = std::make_shared<int>(PrincipledBSDFDoMicrofacetRegularizationConsistentParameterization);\n\tm_options_macro_map[GPUKernelCompilerOptions::PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC] = std::make_shared<int>(PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic);\n\tm_options_macro_map[GPUKernelCompilerOptions::GGX_SAMPLE_FUNCTION] = std::make_shared<int>(PrincipledBSDFAnisotropicGGXSampleFunction);\n\tm_options_macro_map[GPUKernelCompilerOptions::NESTED_DIELETRCICS_STACK_SIZE_OPTION] = std::make_shared<int>(NestedDielectricsStackSize);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::TRIANGLE_POINT_SAMPLING_STRATEGY] = std::make_shared<int>(TrianglePointSamplingStrategy);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_DO_LIGHT_PRESAMPLING] = std::make_shared<int>(ReGIR_GridFillDoLightPresampling);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY] = std::make_shared<int>(ReGIR_GridFillLightSamplingBaseStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_VISIBILITY] = std::make_shared<int>(ReGIR_GridFillTargetFunctionVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM] = std::make_shared<int>(ReGIR_GridFillTargetFunctionCosineTerm);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM_LIGHT_SOURCE] = std::make_shared<int>(ReGIR_GridFillTargetFunctionCosineTermLightSource);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_PRIMARY_HITS_TARGET_FUNCTION_BSDF] = std::make_shared<int>(ReGIR_GridFillPrimaryHitsTargetFunctionBSDF);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_SECONDARY_HITS_TARGET_FUNCTION_BSDF] = std::make_shared<int>(ReGIR_GridFillSecondaryHitsTargetFunctionBSDF);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION] = std::make_shared<int>(ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION] = std::make_shared<int>(ReGIR_GridFillSpatialReuse_AccumulatePreIntegration);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_VISIBILITY] = std::make_shared<int>(ReGIR_ShadingResamplingTargetFunctionVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY] = std::make_shared<int>(ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESMAPLING_JITTER_CANONICAL_CANDIDATES] = std::make_shared<int>(ReGIR_ShadingResamplingJitterCanonicalCandidates);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_BSDF_MIS] = std::make_shared<int>(ReGIR_ShadingResamplingDoBSDFMIS);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_MIS_PAIRWISE_MIS] = std::make_shared<int>(ReGIR_ShadingResamplingDoMISPairwiseMIS);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_SHADE_ALL_SAMPLES] = std::make_shared<int>(ReGIR_ShadingResamplingShadeAllSamples);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_FALLBACK_LIGHT_SAMPLING_STRATEGY] = std::make_shared<int>(ReGIR_FallbackLightSamplingStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE] = std::make_shared<int>(ReGIR_HashGridCollisionResolutionMode);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS] = std::make_shared<int>(ReGIR_HashGridCollisionResolutionMaxSteps);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL] = std::make_shared<int>(ReGIR_HashGridHashSurfaceNormal);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS] = std::make_shared<int>(ReGIR_HashGridHashSurfaceNormalResolutionPrimaryHits);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS] = std::make_shared<int>(ReGIR_HashGridHashSurfaceNormalResolutionSecondaryHits);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_SHADING_JITTER_TRIES] = std::make_shared<int>(ReGIR_ShadingJitterTries);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_ADAPTIVE_ROUGHNESS_GRID_PRECISION] = std::make_shared<int>(ReGIR_HashGridAdaptiveRoughnessGridPrecision);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_HASH_GRID_CONSTANT_GRID_CELL_SIZE] = std::make_shared<int>(ReGIR_HashGridConstantGridCellSize);\n\tm_options_macro_map[GPUKernelCompilerOptions::REGIR_DEBUG_MODE] = std::make_shared<int>(ReGIR_DebugMode);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY] = std::make_shared<int>(DirectLightSamplingStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY] = std::make_shared<int>(DirectLightSamplingBaseStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT] = std::make_shared<int>(DirectLightSamplingNEESampleCount);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS] = std::make_shared<int>(DirectLightUseNEEPlusPlus);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE] = std::make_shared<int>(DirectLightUseNEEPlusPlusRR);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED] = std::make_shared<int>(DirectLightNEEPlusPlusDisplayShadowRaysDiscarded);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED_BOUNCE] = std::make_shared<int>(DirectLightNEEPlusPlusDisplayShadowRaysDiscardedBounce);\n\tm_options_macro_map[GPUKernelCompilerOptions::NEE_PLUS_PLUS_LINEAR_PROBING_STEPS] = std::make_shared<int>(NEEPlusPlus_LinearProbingSteps);\n\tm_options_macro_map[GPUKernelCompilerOptions::NEE_PLUS_PLUS_DEBUG_MODE] = std::make_shared<int>(NEEPlusPlusDebugMode);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION] = std::make_shared<int>(DirectLightSamplingDeltaDistributionOptimization);\n\tm_options_macro_map[GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS] = std::make_shared<int>(DirectLightSamplingAllowBackfacingLights);\n\tm_options_macro_map[GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION] = std::make_shared<int>(RISUseVisiblityTargetFunction);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY] = std::make_shared<int>(EnvmapSamplingStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BSDF_MIS] = std::make_shared<int>(EnvmapSamplingDoBSDFMIS);\n\tm_options_macro_map[GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BILINEAR_FILTERING] = std::make_shared<int>(EnvmapSamplingDoBilinearFiltering);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY] = std::make_shared<int>(PathSamplingStrategy);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY] = std::make_shared<int>(ReSTIR_DI_InitialTargetFunctionVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY] = std::make_shared<int>(ReSTIR_DI_SpatialTargetFunctionVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE] = std::make_shared<int>(ReSTIR_DI_DoVisibilityReuse);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY] = std::make_shared<int>(ReSTIR_DI_BiasCorrectionUseVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS] = std::make_shared<int>(ReSTIR_DI_BiasCorrectionWeights);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY] = std::make_shared<int>(ReSTIR_DI_LaterBouncesSamplingStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING] = std::make_shared<int>(ReSTIR_DI_DoLightPresampling);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY] = std::make_shared<int>(ReSTIR_DI_LightPresamplingStrategy);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT] = std::make_shared<int>(ReSTIR_DI_SpatialDirectionalReuseBitCount);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_DI_DO_OPTIMAL_VISIBILITY_SAMPLING] = std::make_shared<int>(ReSTIR_DI_DoOptimalVisibilitySampling);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY] = std::make_shared<int>(ReSTIR_GI_SpatialTargetFunctionVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT] = std::make_shared<int>(ReSTIR_GI_SpatialDirectionalReuseBitCount);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY] = std::make_shared<int>(ReSTIR_GI_BiasCorrectionUseVisibility);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS] = std::make_shared<int>(ReSTIR_GI_BiasCorrectionWeights);\n\tm_options_macro_map[GPUKernelCompilerOptions::RESTIR_GI_DO_OPTIMAL_VISIBILITY_SAMPLING] = std::make_shared<int>(ReSTIR_GI_DoOptimalVisibilitySampling);\n\n\tm_options_macro_map[GPUKernelCompilerOptions::GMON_M_SETS_COUNT] = std::make_shared<int>(GMoNMSetsCount);\n\t\n\t// Making sure we didn't forget to fill the ALL_MACROS_NAMES vector with all the options that exist\n\tif (GPUKernelCompilerOptions::ALL_MACROS_NAMES.size() != m_options_macro_map.size())\n\t\tUtils::debugbreak();\n}\n\nGPUKernelCompilerOptions::GPUKernelCompilerOptions(const GPUKernelCompilerOptions& other)\n{\n\t*this = other.deep_copy();\n}\n\nGPUKernelCompilerOptions& GPUKernelCompilerOptions::operator=(const GPUKernelCompilerOptions& other)\n{\n\tm_options_macro_map = other.m_options_macro_map;\n\tm_custom_macro_map = other.m_custom_macro_map;\n\n\treturn *this;\n}\n\nGPUKernelCompilerOptions GPUKernelCompilerOptions::deep_copy() const\n{\n\tGPUKernelCompilerOptions out;\n\tout.clear();\n\n\tfor (auto& pair : m_options_macro_map)\n\t\t// Creating new shared ptr for the copy\n\t\tout.m_options_macro_map[pair.first] = std::make_shared<int>(*pair.second);\n\n\tfor (auto& pair : m_custom_macro_map)\n\t\t// Creating new shared ptr for the copy\n\t\tout.m_custom_macro_map[pair.first] = std::make_shared<int>(*pair.second);\n\n\treturn out;\n}\n\nstd::vector<std::string> GPUKernelCompilerOptions::get_all_macros_as_std_vector_string()\n{\n\tstd::vector<std::string> macros;\n\n\tfor (auto macro_key_value : m_options_macro_map)\n\t\tmacros.push_back(\"-D \" + macro_key_value.first + \"=\" + std::to_string(*macro_key_value.second));\n\n\tfor (auto macro_key_value : m_custom_macro_map)\n\t\tmacros.push_back(\"-D \" + macro_key_value.first + \"=\" + std::to_string(*macro_key_value.second));\n\n\treturn macros;\n}\n\nstd::vector<std::string> GPUKernelCompilerOptions::get_relevant_macros_as_std_vector_string(const GPUKernel* kernel) const\n{\n\tstd::vector<std::string> macros;\n\n\t// Looping on all the options macros and checking if the kernel uses that option macro,\n\t// only adding the macro to the returned vector if the kernel uses that option macro\n\tfor (auto macro_key_value : m_options_macro_map)\n\t\tif (kernel->uses_macro(macro_key_value.first))\n\t\t\tmacros.push_back(\"-D \" + macro_key_value.first + \"=\" + std::to_string(*macro_key_value.second));\n\n\t// Adding all the custom macros without conditions\n\tfor (auto macro_key_value : m_custom_macro_map)\n\t\tmacros.push_back(\"-D \" + macro_key_value.first + \"=\" + std::to_string(*macro_key_value.second));\n\n\tstd::vector<std::string> additional_macros = kernel->get_additional_compiler_macros();\n\tfor (const std::string& additional_macro : additional_macros)\n\t\tmacros.push_back(additional_macro);\n\n\treturn macros;\n}\n\nvoid GPUKernelCompilerOptions::set_macro_value(const std::string& name, int value)\n{\n\tif (ALL_MACROS_NAMES.find(name) != ALL_MACROS_NAMES.end())\n\t{\n\t\tif (m_options_macro_map.find(name) != m_options_macro_map.end())\n\t\t\t// If you could find the name in the options-macro, settings its value\n\t\t\t*m_options_macro_map[name] = value;\n\t\telse\n\t\t\t// Otherwise, creating it\n\t\t\tm_options_macro_map[name] = std::make_shared<int>(value);\n\t}\n\telse\n\t{\n\t\t// Otherwise, this is a user defined macro, putting it in the custom macro map\n\t\tif (m_custom_macro_map.find(name) != m_custom_macro_map.end())\n\t\t\t// Updating the macro's alue if it already exists\n\t\t\t*m_custom_macro_map[name] = value;\n\t\telse\n\t\t\t// Creating it otherwise\n\t\t\tm_custom_macro_map[name] = std::make_shared<int>(value);\n\t}\n}\n\nvoid GPUKernelCompilerOptions::remove_macro(const std::string& name)\n{\n\t// Only removing from the custom macro map because we cannot remove the options-macro\n\tm_custom_macro_map.erase(name);\n}\n\nbool GPUKernelCompilerOptions::has_macro(const std::string& name)\n{\n\t// Only checking the custom macro map because we cannot remove the options-macro so it makes\n\t// no sense to check whether this instance has the macro \"InteriorStackStrategy\"\n\t// for example, it will always be yes\n\treturn m_custom_macro_map.find(name) != m_custom_macro_map.end();\n}\n\nint GPUKernelCompilerOptions::get_macro_value(const std::string& name) const\n{\n\tauto find = m_options_macro_map.find(name);\n\n\tif (find == m_options_macro_map.end())\n\t{\n\t\t// Wasn't found in the options-macro, trying in the custom macros\n\t\tauto find_custom = m_custom_macro_map.find(name);\n\t\tif (find_custom == m_custom_macro_map.end())\n\t\t\treturn std::numeric_limits<int>::min();\n\t\telse\n\t\t\treturn *find_custom->second;\n\t}\n\telse\n\t\treturn *find->second;\n}\n\nconst std::shared_ptr<int> GPUKernelCompilerOptions::get_pointer_to_macro_value(const std::string& name) const\n{\n\tauto find = m_options_macro_map.find(name);\n\n\tif (find == m_options_macro_map.end())\n\t{\n\t\t// Wasn't found in the options-macro, trying in the custom macros\n\t\tauto find_custom = m_custom_macro_map.find(name);\n\t\tif (find_custom == m_custom_macro_map.end())\n\t\t\treturn nullptr;\n\t\telse\n\t\t\treturn find_custom->second;\n\t}\n\telse\n\t\treturn find->second;\n}\n\nint* GPUKernelCompilerOptions::get_raw_pointer_to_macro_value(const std::string& name)\n{\n\tstd::shared_ptr<int> pointer = get_pointer_to_macro_value(name);\n\tif (pointer != nullptr)\n\t\treturn pointer.get();\n\n\treturn nullptr;\n}\n\nvoid GPUKernelCompilerOptions::set_pointer_to_macro(const std::string& name, std::shared_ptr<int> pointer_to_value)\n{\n\tauto find = m_options_macro_map.find(name);\n\n\tif (find == m_options_macro_map.end())\n\t\t// Wasn't found in the options-macro, adding/setting it in the custom macro map\n\t\tm_custom_macro_map[name] = pointer_to_value;\n\telse\n\t\tm_options_macro_map[name] = pointer_to_value;\n}\n\nconst std::unordered_map<std::string, std::shared_ptr<int>>& GPUKernelCompilerOptions::get_options_macro_map() const\n{\n\treturn m_options_macro_map;\n}\n\nconst std::unordered_map<std::string, std::shared_ptr<int>>& GPUKernelCompilerOptions::get_custom_macro_map() const\n{\n\treturn m_custom_macro_map;\n}\n\nvoid GPUKernelCompilerOptions::clear()\n{\n\tm_custom_macro_map.clear();\n\tm_options_macro_map.clear();\n}\n\nvoid GPUKernelCompilerOptions::apply_onto(GPUKernelCompilerOptions& other)\n{\n\tfor (auto& pair : m_options_macro_map)\n\t{\n\t\tif (other.m_options_macro_map.find(pair.first) == other.m_options_macro_map.end())\n\t\t\t// The option doesn't exist, we need to create the shared ptr\n\t\t\tother.m_options_macro_map[pair.first] = std::make_shared<int>(*pair.second);\n\t\telse\n\t\t\t// No need to create a shared ptr, we can just copy the value\n\t\t\t*other.m_options_macro_map[pair.first] = *pair.second;\n\t}\n\n\tfor (auto& pair : m_custom_macro_map)\n\t{\n\t\tif (other.m_custom_macro_map.find(pair.first) == other.m_custom_macro_map.end())\n\t\t\t// The option doesn't exist, we need to create the shared ptr\n\t\t\tother.m_custom_macro_map[pair.first] = std::make_shared<int>(*pair.second);\n\t\telse\n\t\t\t// No need to create a shared ptr, we can just copy the value\n\t\t\t*other.m_custom_macro_map[pair.first] = *pair.second;\n\t}\n}\n"
  },
  {
    "path": "src/Compiler/GPUKernelCompilerOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_KERNEL_OPTIONS_H\n#define GPU_KERNEL_OPTIONS_H\n\n#include <memory>\n#include <string>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\nclass GPUKernel;\n\nclass GPUKernelCompilerOptions\n{\npublic:\n\tstatic const std::string USE_SHARED_STACK_BVH_TRAVERSAL;\n\tstatic const std::string SHARED_STACK_BVH_TRAVERSAL_BLOCK_SIZE;\n\tstatic const std::string SHARED_STACK_BVH_TRAVERSAL_SIZE;\n\tstatic const std::string DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE;\n\tstatic const std::string DISPLAY_ONLY_SAMPLE_N;\n\n\tstatic const std::string BSDF_OVERRIDE;\n\tstatic const std::string PRINCIPLED_BSDF_DIFFUSE_LOBE;\n\tstatic const std::string PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION;\n\tstatic const std::string PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION;\n\tstatic const std::string PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL;\n\tstatic const std::string PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL;\n\tstatic const std::string PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION;\n\tstatic const std::string PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION;\n\tstatic const std::string PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC;\n\tstatic const std::string GGX_SAMPLE_FUNCTION;\n\tstatic const std::string NESTED_DIELETRCICS_STACK_SIZE_OPTION;\n\n\tstatic const std::string TRIANGLE_POINT_SAMPLING_STRATEGY;\n\n\tstatic const std::string REGIR_GRID_FILL_DO_LIGHT_PRESAMPLING;\n\tstatic const std::string REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY;\n\tstatic const std::string REGIR_GRID_FILL_TARGET_FUNCTION_VISIBILITY;\n\tstatic const std::string REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM;\n\tstatic const std::string REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM_LIGHT_SOURCE;\n\tstatic const std::string REGIR_GRID_FILL_PRIMARY_HITS_TARGET_FUNCTION_BSDF;\n\tstatic const std::string REGIR_GRID_FILL_SECONDARY_HITS_TARGET_FUNCTION_BSDF;\n\tstatic const std::string REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION;\n\tstatic const std::string REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION;\n\tstatic const std::string REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_VISIBILITY;\n\tstatic const std::string REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY;\n\tstatic const std::string REGIR_SHADING_RESMAPLING_JITTER_CANONICAL_CANDIDATES;\n\tstatic const std::string REGIR_SHADING_RESAMPLING_DO_BSDF_MIS;\n\tstatic const std::string REGIR_SHADING_RESAMPLING_DO_MIS_PAIRWISE_MIS;\n\tstatic const std::string REGIR_SHADING_RESAMPLING_SHADE_ALL_SAMPLES;\n\tstatic const std::string REGIR_FALLBACK_LIGHT_SAMPLING_STRATEGY;\n\tstatic const std::string REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE;\n\tstatic const std::string REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS;\n\tstatic const std::string REGIR_HASH_GRID_HASH_SURFACE_NORMAL;\n\tstatic const std::string REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS;\n\tstatic const std::string REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS;\n\tstatic const std::string REGIR_SHADING_JITTER_TRIES;\n\tstatic const std::string REGIR_HASH_GRID_ADAPTIVE_ROUGHNESS_GRID_PRECISION;\n\tstatic const std::string REGIR_HASH_GRID_CONSTANT_GRID_CELL_SIZE;\n\tstatic const std::string REGIR_DEBUG_MODE;\n\n\tstatic const std::string DIRECT_LIGHT_SAMPLING_STRATEGY;\n\tstatic const std::string DIRECT_LIGHT_SAMPLING_BASE_STRATEGY;\n\tstatic const std::string DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT;\n\tstatic const std::string DIRECT_LIGHT_USE_NEE_PLUS_PLUS;\n\tstatic const std::string DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE;\n\tstatic const std::string DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED;\n\tstatic const std::string DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED_BOUNCE;\n\tstatic const std::string NEE_PLUS_PLUS_LINEAR_PROBING_STEPS;\n\tstatic const std::string NEE_PLUS_PLUS_DEBUG_MODE;\n\tstatic const std::string DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION;\n\tstatic const std::string DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS;\n\tstatic const std::string RIS_USE_VISIBILITY_TARGET_FUNCTION;\n\n\tstatic const std::string ENVMAP_SAMPLING_STRATEGY;\n\tstatic const std::string ENVMAP_SAMPLING_DO_BSDF_MIS;\n\tstatic const std::string ENVMAP_SAMPLING_DO_BILINEAR_FILTERING;\n\n\tstatic const std::string PATH_SAMPLING_STRATEGY;\n\n\tstatic const std::string RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY;\n\tstatic const std::string RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY;\n\tstatic const std::string RESTIR_DI_DO_VISIBILITY_REUSE;\n\tstatic const std::string RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY;\n\tstatic const std::string RESTIR_DI_BIAS_CORRECTION_WEIGHTS;\n\tstatic const std::string RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY;\n\tstatic const std::string RESTIR_DI_DO_LIGHT_PRESAMPLING;\n\tstatic const std::string RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY;\n\tstatic const std::string RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT;\n\tstatic const std::string RESTIR_DI_DO_OPTIMAL_VISIBILITY_SAMPLING;\n\n\tstatic const std::string RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY;\n\tstatic const std::string RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT;\n\tstatic const std::string RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY;\n\tstatic const std::string RESTIR_GI_BIAS_CORRECTION_WEIGHTS;\n\tstatic const std::string RESTIR_GI_DO_OPTIMAL_VISIBILITY_SAMPLING;\n\t\n\tstatic const std::string GMON_M_SETS_COUNT;\n\n\tstatic const std::unordered_set<std::string> ALL_MACROS_NAMES;\n\n\tGPUKernelCompilerOptions();\n\tGPUKernelCompilerOptions(const GPUKernelCompilerOptions& other);\n\n\t/**\n\t * Shallow copy of the options of 'other' into 'this'\n\t * \n\t * The shared_ptr of the options of 'other' will be shared with 'this': \n\t * this means that if changing the value of \"OPTION_1\" in 'other', \n\t * the value of \"OPTION_1\" will also change in 'this'.\n\t * \n\t * If this is exactly the behavior that you don't want, have a look at 'deep_copy'\n\t */\n\tGPUKernelCompilerOptions& operator=(const GPUKernelCompilerOptions& other);\n\t/**\n\t * Returns a new GPUKernelCompilerOptions object that has the same option values as 'this'\n\t * but with different shared_ptr. This means that if changing the value of \"OPTION_1\" in 'other', \n\t * the value of \"OPTION_1\" will not change in the new object returned by this function.\n\t */\n\tGPUKernelCompilerOptions deep_copy() const;\n\n\t/**\n\t * Gets a list of all the compiler options of the form { \"-D InteriorStackStrategy=1\", ... }\n\t * that can directly be passed to the kernel compiler.\n\t * \n\t * The returned options do not contain additional include directories.\n\t * Additional include directories are not considered options.\n\t */\n\tstd::vector<std::string> get_all_macros_as_std_vector_string();\n\n\t/**\n\t * Same as get_all_macros_as_std_vector_string() but the returned vector doesn't contain\n\t * the macros that do not apply to the kernel given in parameter.\n\t * \n\t * For example, the camera rays kernel doesn't care about whether our direct lighting\n\t * strategy is MIS, RIS, ReSTIR DI, ... so if a camera ray kernel is given in parameter\n\t * the returned vector will not contain the macro for the direct lighting strategy.\n\t * Same logic for the other macros defined in KernelOptions.h\n\t * \n\t * The returned vector always contain all the \"custom\" macros manually defined through\n\t * a call to 'set_macro_value()' (unless the macro changed through 'set_macro_value()' is an\n\t * option macro defined in KernelOptions.h as defined above)\n\t * \n\t * The returned vector also contains all the additional compiler macro that were added\n\t * to the kernel by calling 'kernel.add_additional_macro_for_compilation()'\n\t */\n\tstd::vector<std::string> get_relevant_macros_as_std_vector_string(const GPUKernel* kernel) const;\n\n\t/**\n\t * Replace the value of the macro if it has already been added previous to this call.\n\t * If the macro doesn't exist in these compiler options, it it added to the custom\n\t * options map.\n\t * \n\t * The 'name' parameter is expected to be given without the '-D' macro prefix commonly \n\t * given to compilers.\n\t * For example, if you want to define a macro \"MyMacro\" equal to 1, you simply\n\t * call set_macro_value(\"MyMacro\", 1).\n\t * The addition of the -D prefix will be added internally.\n\t */\n\tvoid set_macro_value(const std::string& name, int value);\n\n\t/**\n\t * Removes a macro from the list given to the compiler\n\t */\n\tvoid remove_macro(const std::string& name);\n\n\t/**\n\t * Returns true if the given macro is defined. False otherwise\n\t */\n\tbool has_macro(const std::string& name);\n\n\t/** \n\t * Gets the value of a macro or -1 if the macro isn't set\n\t */\n\tint get_macro_value(const std::string& name) const;\n\n\t/**\n\t * Returns a pointer to the value of a macro given its name.\n\t * \n\t * Useful for use with ImGui for example.\n\t * \n\t * nullptr is returned if the option doesn't exist (set_macro_value() wasn't called yet)\n\t */\n\tconst std::shared_ptr<int> get_pointer_to_macro_value(const std::string& name) const;\n\tint* get_raw_pointer_to_macro_value(const std::string& name);\n\n\t/**\n\t * Links the value of the macro 'name' with the given pointer such that if the value at the given\n\t * 'pointer_to_value' is modified, the value of the same macro in this instance of GPUKernelCompilerOptions\n\t * will also be modified to the same value\n\t */\n\tvoid set_pointer_to_macro(const std::string& name, std::shared_ptr<int> pointer_to_value);\n\n\t/**\n\t * Returns the map that stores the macro names with their associated values\n\t */\n\tconst std::unordered_map<std::string, std::shared_ptr<int>>& get_options_macro_map() const;\n\n\t/**\n\t * Returns the map that stores the custom macro names with their associated values\n\t */\n\tconst std::unordered_map<std::string, std::shared_ptr<int>>& get_custom_macro_map() const;\n\n\t/**\n\t * Removes all options from this instance\n\t */\n\tvoid clear();\n\n\t/**\n\t * Overrides any option value of 'other' with the value of the corresponding option of this instance\n\t * If the option doesn't exist in other, it is added\n\t */\n\tvoid apply_onto(GPUKernelCompilerOptions& other);\n\nprivate:\n\t// Maps the name of the macro to its value. \n\t// Example: [\"InteriorStackStrategy\", 1]\n\t// \n\t// This \"options macro\" map only contains the macro as defined in KernelOptions.h\n\t// Those are the macros that control the compilation of the kernels to enable / disable\n\t// certain behavior of the path tracer by recompilation (to save registers by eliminating code)\n\t//\n\t// This macro map and the 'custom_macro_map' contain pointers to int for their values\n\t// because we want to be able to synchronize the value of the options with\n\t// another instance of GPUKernelCompilerOptions. This requires having the value\n\t// of our macro point to the value of the other GPUKernelCompilerOptions instance\n\t// and we need pointers for that\n\tstd::unordered_map<std::string, std::shared_ptr<int>> m_options_macro_map;\n\n\t// This \"custom macro\" map contains the macros given by the user with set_macro_value().\n\t// Any macro that isn't defined in KernelOptions.h will be found in this custom macro map\n\tstd::unordered_map<std::string, std::shared_ptr<int>> m_custom_macro_map;\n};\n\n\n#endif"
  },
  {
    "path": "src/Device/functions/FilterFunction.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_FUNCTIONS_FILTER_FUNCTION_H\n#define DEVICE_FUNCTIONS_FILTER_FUNCTION_H\n\n#include \"Device/functions/FilterFunctionPayload.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Material.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n/**\n * This filter function handles self intersection avoidance and alpha testing\n * \n * return FALSE if the intersection is ACCEPTED\n * return true if the intersection is rejected\n */\nHIPRT_DEVICE HIPRT_INLINE bool filter_function(const hiprtRay&, const void*, void* payld, const hiprtHit& hit)\n{\n\tFilterFunctionPayload* payload = reinterpret_cast<FilterFunctionPayload*>(payld);\n\n\tint global_triangle_index_hit;\n\tif (payload->simplified_light_ray)\n\t\t// If the ray is shot in the BVH containg only the emissive triangles, the hit.primID is the index of the emissive triangle in that BVH,\n\t\t// not the index of the emissive triangle in the whole scene, which 'payload->last_hit_primitive_index' is\n\t\t//\n\t\t// So we need to 'convert' the hit index in the light BVH to a hit index in the whole scene BVH and do\n\t\t// the comparison against that\n\t\tglobal_triangle_index_hit = payload->render_data->buffers.emissive_triangles_primitive_indices_and_emissive_textures[hit.primID];\n\telse\n\t\tglobal_triangle_index_hit = hit.primID;\n\n\tif (global_triangle_index_hit == payload->last_hit_primitive_index)\n\t\t// This is a self-intersection, filtering it out\n\t\t//\n\t\t// Triangles are planar so one given triangle can\n\t\t// never be intersect twice in a row (unless we're absolutely\n\t\t// perfectly parallel to the triangle but let's ignore that...)\n\t\t//\n\t\t// This self-intersection avoidance only works for planar primitives\n\t\treturn true;\n\n\tif (!payload->render_data->render_settings.do_alpha_testing)\n\t\t// No alpha testing\n\t\treturn false;\n\n\tif (payload->bounce >= payload->render_data->render_settings.alpha_testing_indirect_bounce)\n\t\t// Alpha testing is disabled at the current bounce\n\t\t// \n\t\t// Returning false to indicate an intersection\n\t\treturn false;\n\n\tint material_index = payload->render_data->buffers.material_indices[global_triangle_index_hit];\n\tif (payload->render_data->buffers.material_opaque[material_index])\n\t\t// The material is fully opaque, no need to test further, accept the intersection\n\t\treturn false;\n\n\t// Composition both the alpha of the base color texture and the material\n\tunsigned short int base_color_texture_index = payload->render_data->buffers.materials_buffer.get_base_color_texture_index(material_index);\n\tfloat base_color_alpha = get_hit_base_color_alpha(*payload->render_data, base_color_texture_index, global_triangle_index_hit, hit.uv);\n\tfloat alpha_opacity = payload->render_data->buffers.materials_buffer.get_alpha_opacity(material_index);\n\tfloat composited_alpha = alpha_opacity * base_color_alpha;\n\n\tif ((*payload->random_number_generator)() < composited_alpha)\n\t\t// Alpha test not passing, the ray is blocked\n\t\treturn false;\n\n\t// No tests stopped the ray, that's not a hit.\n\t// Returning 'true' to filter out the intersection\n\treturn true;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/functions/FilterFunctionPayload.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_FUNCTIONS_FILTER_FUNCTION_PAYLOAD_H\n#define DEVICE_FUNCTIONS_FILTER_FUNCTION_PAYLOAD_H\n\nstruct HIPRTRenderData;\nstruct Xorshift32Generator;\n\nstruct FilterFunctionPayload\n{\n\t// -- Alpha testing payload --\n\tconst HIPRTRenderData* render_data;\n\tXorshift32Generator* random_number_generator;\n\t// -- Alpha testing payload --\n\n\t// What bounce the ray being launched currently is at\n\tint bounce = 0;\n\n\t// -- Self intersection avoidance payload --\n\tint last_hit_primitive_index;\n\tbool simplified_light_ray = false; // Whether or not the ray is shot in the BVH containing only the emissive triangles of the scene\n\t// -- Self intersection avoidance payload --\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/AdaptiveSampling.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_ADAPTIVE_SAMPLING_H\n#define DEVICE_ADAPTIVE_SAMPLING_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_pixel_confidence_interval(const HIPRTRenderData& render_data, int pixel_index, int pixel_sample_count, float& average_luminance)\n{\n    float luminance = render_data.buffers.accumulated_ray_colors[pixel_index].luminance();\n    average_luminance = luminance / (pixel_sample_count + 1);\n\n    float squared_luminance = render_data.aux_buffers.pixel_squared_luminance[pixel_index];\n    float pixel_variance = (squared_luminance - luminance * average_luminance) / (pixel_sample_count + 1);\n\n    return 1.96f * sqrtf(pixel_variance) / sqrtf(pixel_sample_count + 1);\n}\n\n/**\n * pixel_converged is set to true if the given pixel has reached the noise\n * threshold given in render_data.render_settings.stop_pixel_percentage_converged. It \n * is set to false otherwise.\n * \n * Returns true if the pixel needs more sample according to adaptive sampling (or if adaptive sampling is disabled).\n * Returns false otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool adaptive_sampling(const HIPRTRenderData& render_data, int pixel_index, bool& pixel_converged)\n{\n    const HIPRTRenderSettings& render_settings = render_data.render_settings;\n    const AuxiliaryBuffers& aux_buffers = render_data.aux_buffers;\n\n    if (!render_settings.has_access_to_adaptive_sampling_buffers())\n        // Adaptive sampling is not on so returning true to indicate\n        // that this pixel is going to need sampling\n        return true;\n\n    if (render_settings.enable_adaptive_sampling)\n    {\n        // Computing pixel convergence according to adaptive sampling to\n        // know whether to keep sampling that pixel or not\n\n        if (aux_buffers.pixel_converged_sample_count[pixel_index] != -1)\n            // Pixel is already converged because we have a value != -1 in the\n            // pixels converged sample count buffer\n            return false;\n\n        int pixel_sample_count = aux_buffers.pixel_sample_count[pixel_index];\n        if (pixel_sample_count > render_settings.adaptive_sampling_min_samples)\n        {\n            float average_luminance;\n            float confidence_interval = get_pixel_confidence_interval(render_data, pixel_index, pixel_sample_count, average_luminance);\n\n            bool pixel_needs_sampling = confidence_interval > render_settings.adaptive_sampling_noise_threshold * average_luminance;\n            if (!pixel_needs_sampling)\n            {\n                if (aux_buffers.pixel_converged_sample_count[pixel_index] == -1)\n                    // Indicates no need to sample anymore by indicating that this pixel has converged\n                    // only if we hadn't indicated the convergence before already\n                    aux_buffers.pixel_converged_sample_count[pixel_index] = pixel_sample_count;\n\n                return false;\n            }\n        }\n\n        return true;\n    }\n    // Only counting the convergence of pixels according to\n    // the pixel stop noise threshold if adaptive sampling is not enabled\n    //\n    // The rationale is that if we have both adaptive sampling and pixel stop noise threshold \n    // enabled, we probably want to use adaptive sampling only but also stop rendering after \n    // a certain proportion of pixels have converged and we don't actually want to use the \n    // \"stop pixel noise threshold\" but only the \"stop pixel convergence proportion\"\n    else if (render_settings.stop_pixel_noise_threshold > 0.0f && render_settings.use_pixel_stop_noise_threshold)\n    {\n        int pixel_sample_count = aux_buffers.pixel_sample_count[pixel_index];\n\n        float average_luminance;\n        float confidence_interval = get_pixel_confidence_interval(render_data, pixel_index, pixel_sample_count, average_luminance);\n\n        // The value of pixel_converged will be used outside of this function\n        pixel_converged =\n            // Converged enough\n            (confidence_interval <= render_settings.stop_pixel_noise_threshold * average_luminance)\n            // At least 2 samples because we can't evaluate the variance with only 1 sample\n            && (render_settings.sample_number > 1);\n\n        int current_converged_count = aux_buffers.pixel_converged_sample_count[pixel_index];\n        if (pixel_converged && current_converged_count == -1)\n            // If the pixel has converged, storing the number of samples at which it has converged.\n            // We're only storing the number of samples if we hadn't already (if the value in the buffer is -1)\n            aux_buffers.pixel_converged_sample_count[pixel_index] = pixel_sample_count;\n        else if (!pixel_converged)\n            // If the pixel hasn't converged\n            aux_buffers.pixel_converged_sample_count[pixel_index] = -1;\n    }\n\n    return true;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/AliasTable.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_DEVICE_ALIAS_TABLE_H\n#define DEVICE_INCLUDES_DEVICE_ALIAS_TABLE_H\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\nstruct DeviceAliasTable\n{\n\t HIPRT_HOST_DEVICE int sample(Xorshift32Generator& rng) const\n\t{\n\t\tint random_index = rng.random_index(size);\n\t\tfloat probability = alias_table_probas[random_index];\n\t\tif (rng() > probability)\n\t\t\t// Picking the alias\n\t\t\trandom_index = alias_table_alias[random_index];\n\n\t\treturn random_index;\n\t}\n\n\tint* alias_table_alias = nullptr;\n\tfloat* alias_table_probas = nullptr;\n\n\tfloat sum_elements = 0.0f;\n\tunsigned int size = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/BSDFContext.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_BSDF_CONTEXT_H\n#define DEVICE_INCLUDES_BSDF_CONTEXT_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n#include \"Device/includes/BSDFs/MicrofacetRegularization.h\"\n#include \"Device/includes/RayVolumeState.h\"\n\nstruct BSDFContext\n{\n\tconst DeviceUnpackedEffectiveMaterial& material;\n\tRayVolumeState& volume_state;\n\n\tfloat3 view_direction = make_float3(-1.0f, -1.0f, -1.0f);\n\tfloat3 shading_normal = make_float3(-1.0f, -1.0f, -1.0f);\n\tfloat3 geometric_normal = make_float3(-1.0f, -1.0f, -1.0f);\n\tfloat3 to_light_direction = make_float3(-1.0f, -1.0f, -1.0f);\n\n\tBSDFIncidentLightInfo& incident_light_info;\n\tint current_bounce = 0;\n\n\tfloat accumulated_path_roughness = 0.0f;\n\n\t// Whether or not to modify the volume state of the ray as the BSDF is sampled / evaluated.\n\t//\n\t// For example, if the ray is currently refracting out of a glass material, and 'update_ray_volume_state' == true,\n\t// the ray volume state of the ray will be updated and the glass object will be popped out of the\n\t// nested dielectrics stack\n\tbool update_ray_volume_state = false;\n\n\t// Whether or not to regularize the BSDF when sampling/evaluating it\n\tMicrofacetRegularization::RegularizationMode bsdf_regularization_mode = MicrofacetRegularization::RegularizationMode::NO_REGULARIZATION;\n\n\t/**\n\t * 'to_light_direction' is only needed if evaluating the BSDF // TODO create a separate eval context and sampling context\n\t * 'incident_light_info' should be passed as BSDFIncidentLightInfo::NO_INFO if you don't care about what lobe the BSDF sampled of if you don't have the information about\n\t * what lobe the 'to_light_direction' comes from (during NEE light sampling for example)\n\t */\n\tHIPRT_HOST_DEVICE BSDFContext(const float3& view_direction_, const float3& shading_normal, const float3& geometric_normal, const float3& to_light_direction,\n\t\tBSDFIncidentLightInfo& incident_light_info,\n\t\tRayVolumeState& ray_volume_state, bool update_ray_volume_state,\n\t\tconst DeviceUnpackedEffectiveMaterial& material,\n\t\tint current_bounce, float accumulated_path_roughness,\n\t\tMicrofacetRegularization::RegularizationMode regularize_bsdf = MicrofacetRegularization::RegularizationMode::NO_REGULARIZATION) :\n\n\t\tmaterial(material), volume_state(ray_volume_state), \n\t\tview_direction(view_direction_), shading_normal(shading_normal), geometric_normal(geometric_normal), to_light_direction(to_light_direction),\n\t\tincident_light_info(incident_light_info), update_ray_volume_state(update_ray_volume_state),\n\t\tcurrent_bounce(current_bounce), accumulated_path_roughness(accumulated_path_roughness), bsdf_regularization_mode(regularize_bsdf) {}\n};\n\n#endif"
  },
  {
    "path": "src/Device/includes/BSDFs/BSDFIncidentLightInfo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_BSDF_EVAL_INCIDENT_LIGHT_INFO_H\n#define DEVICE_BSDF_EVAL_INCIDENT_LIGHT_INFO_H\n\nenum BSDFIncidentLightInfo\n{\n\t// Default value: nothing is assumed about the incident light direction\n\tNO_INFO = 0,\n\n\t// The additional information below can be used by the bsdf_eval() function to\n\t// avoid evaluating delta lobes (such as a perfectly smooth clearcoat lobe,\n\t// glass lobe, specualr lobe, ...) and save some performance.\n\t//\n\t// In such scenarios, the BSDF evaluation will still be correct because delta distribution\n\t// lobes will evaluate to 0 anyways if they are evaluated with a direction that\n\t// was not sampled from the lobe itself.\n\t//\n\t// For example, consider a clearcoat diffuse lobe. If bsdf_eval() is called with an\n\t// incident light direction that was sampled from the diffuse lobe, the perfectly smooth clearcoat lobe\n\t// is going to have its contribution evaluate to 0 because there is no chance that the sampled\n\t// diffuse direction perfectly aligns with the delta of the smooth clearcoat lobe\n\t//\n\t// Same with all the other lobes that can be delta distributions\n\t//\n\t// Using bit shifts for the values here so that it can be used easily by ReSTIR DI\n\tLIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE = 1 << 1,\n\tLIGHT_DIRECTION_SAMPLED_FROM_FIRST_METAL_LOBE = 1 << 2,\n\tLIGHT_DIRECTION_SAMPLED_FROM_SECOND_METAL_LOBE = 1 << 3,\n\tLIGHT_DIRECTION_SAMPLED_FROM_SPECULAR_LOBE = 1 << 4,\n\tLIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFLECT_LOBE = 1 << 5,\n\tLIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE = 1 << 6,\n\tLIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_LOBE = 1 << 7,\n\tLIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_TRANSMISSION_LOBE = 1 << 8,\n\n\t// This can be used if the incident light direction comes from sampling a light in the scene\n\t// from example\n\tLIGHT_DIRECTION_NOT_SAMPLED_FROM_BSDF\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/CookTorrance.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_COOK_TORRANCE_H\n#define DEVICE_COOK_TORRANCE_H\n\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/Material/Material.h\"\n#include \"Device/includes/Sampling.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_normal_distribution(float alpha, float NoH)\n{\n    //To avoid numerical instability when NoH basically == 1, i.e when the\n    //material is a perfect mirror and the normal distribution function is a Dirac\n\n    NoH = hippt::min(NoH, 0.999999f);\n    float alpha2 = alpha * alpha;\n    float NoH2 = NoH * NoH;\n    float b = (NoH2 * (alpha2 - 1.0f) + 1.0f);\n    return alpha2 * M_INV_PI / (b * b);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float G1_schlick_ggx(float k, float dot_prod)\n{\n    return dot_prod / (dot_prod * (1.0f - k) + k);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_smith_masking_shadowing(float roughness_squared, float NoV, float NoL)\n{\n    float k = roughness_squared * 0.5f;\n\n    return G1_schlick_ggx(k, NoL) * G1_schlick_ggx(k, NoV);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float cook_torrance_brdf_pdf(const DeviceUnpackedEffectiveMaterial& material, const float3& view_direction, const float3& to_light_direction, const float3& surface_normal)\n{\n    float3 microfacet_normal = hippt::normalize(view_direction + to_light_direction);\n\n    float alpha = material.roughness * material.roughness;\n\n    float VoH = hippt::max(0.0f, hippt::dot(view_direction, microfacet_normal));\n    float NoH = hippt::max(0.0f, hippt::dot(surface_normal, microfacet_normal));\n    float D = GGX_normal_distribution(alpha, NoH);\n\n    return D * NoH / (4.0f * VoH);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F cook_torrance_brdf(const DeviceUnpackedEffectiveMaterial& material, const float3& to_light_direction, const float3& view_direction, const float3& surface_normal)\n{\n    ColorRGB32F brdf_color = ColorRGB32F(0.0f, 0.0f, 0.0f);\n    ColorRGB32F base_color = material.base_color;\n\n    float3 halfway_vector = hippt::normalize(view_direction + to_light_direction);\n\n    float NoV = hippt::max(0.0f, hippt::dot(surface_normal, view_direction));\n    float NoL = hippt::max(0.0f, hippt::dot(surface_normal, to_light_direction));\n    float NoH = hippt::max(0.0f, hippt::dot(surface_normal, halfway_vector));\n    float VoH = hippt::max(0.0f, hippt::dot(halfway_vector, view_direction));\n\n    if (NoV > 0.0f && NoL > 0.0f && NoH > 0.0f)\n    {\n        float metallic = material.metallic;\n        float roughness = material.roughness;\n\n        float alpha = roughness * roughness;\n\n        ////////// Cook Torrance BRDF //////////\n        ColorRGB32F F;\n        float D, G;\n\n        //F0 = 0.04 for dielectrics, 1.0 for metals (approximation)\n        ColorRGB32F F0 = ColorRGB32F(0.04f * (1.0f - metallic)) + metallic * base_color;\n\n        //GGX Distribution function\n        F = fresnel_schlick(F0, VoH);\n        D = GGX_normal_distribution(alpha, NoH);\n        G = GGX_smith_masking_shadowing(alpha, NoV, NoL);\n\n        ColorRGB32F kD = ColorRGB32F(1.0f - metallic); //Metals do not have a base_color part\n        kD = kD * (ColorRGB32F(1.0f) - F);//Only the transmitted light is diffused\n\n        ColorRGB32F diffuse_part = kD * base_color * M_INV_PI;\n        ColorRGB32F specular_part = (F * D * G) / (4.0f * NoV * NoL);\n\n        brdf_color = diffuse_part + specular_part;\n    }\n\n    return brdf_color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F cook_torrance_brdf_importance_sample(const DeviceUnpackedEffectiveMaterial& material, const float3& view_direction, const float3& surface_normal, float3& output_direction, float& pdf, Xorshift32Generator& random_number_generator)\n{\n    pdf = 0.0f;\n\n    float metallic = material.metallic;\n    float roughness = material.roughness;\n    float alpha = roughness * roughness;\n\n    float rand1 = random_number_generator();\n    float rand2 = random_number_generator();\n\n    float phi = M_TWO_PI * rand1;\n    float theta = acos((1.0f - rand2) / (rand2 * (alpha * alpha - 1.0f) + 1.0f));\n    float sin_theta = sin(theta);\n\n    // The microfacet normal is sampled in its local space, we'll have to bring it to the space\n    // around the surface normal\n    float3 microfacet_normal_local_space = make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos(theta));\n    float3 microfacet_normal = local_to_world_frame(surface_normal, microfacet_normal_local_space);\n    if (hippt::dot(microfacet_normal, surface_normal) < 0.0f)\n        //The microfacet normal that we sampled was under the surface, this can happen\n        return ColorRGB32F(0.0f);\n    float3 to_light_direction = hippt::normalize(2.0f * hippt::dot(microfacet_normal, view_direction) * microfacet_normal - view_direction);\n    float3 halfway_vector = microfacet_normal;\n    output_direction = to_light_direction;\n\n    ColorRGB32F brdf_color = ColorRGB32F(0.0f, 0.0f, 0.0f);\n    ColorRGB32F base_color = material.base_color;\n\n    float NoV = hippt::max(0.0f, hippt::dot(surface_normal, view_direction));\n    float NoL = hippt::max(0.0f, hippt::dot(surface_normal, to_light_direction));\n    float NoH = hippt::max(0.0f, hippt::dot(surface_normal, halfway_vector));\n    float VoH = hippt::max(0.0f, hippt::dot(halfway_vector, view_direction));\n\n    if (NoV > 0.0f && NoL > 0.0f && NoH > 0.0f)\n    {\n        /////////// Cook Torrance BRDF //////////\n        ColorRGB32F F;\n        float D, G;\n\n        //GGX Distribution function\n        D = GGX_normal_distribution(alpha, NoH);\n\n        //F0 = 0.04 for dielectrics, 1.0 for metals (approximation)\n        ColorRGB32F F0 = ColorRGB32F(0.04f * (1.0f - metallic)) + metallic * base_color;\n        F = fresnel_schlick(F0, VoH);\n        G = GGX_smith_masking_shadowing(alpha, NoV, NoL);\n\n        ColorRGB32F kD = ColorRGB32F(1.0f - metallic); //Metals do not have a base_color part\n        kD = kD * (ColorRGB32F(1.0f) - F);//Only the transmitted light is diffused\n\n        ColorRGB32F diffuse_part = kD * base_color * M_INV_PI;\n        ColorRGB32F specular_part = (F * D * G) / (4.0f * NoV * NoL);\n\n        pdf = D * NoH / (4.0f * VoH);\n\n        brdf_color = diffuse_part + specular_part;\n    }\n\n    return brdf_color;\n}\n\n#endif"
  },
  {
    "path": "src/Device/includes/BSDFs/Glass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_GLASS_H\n#define DEVICE_GLASS_H\n\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/Material/Material.h\"\n#include \"Device/includes/Sampling.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F smooth_glass_bsdf(const DeviceUnpackedEffectiveMaterial& material, float3& out_bounce_direction, const float3& ray_direction, float3& surface_normal, float eta_i, float eta_t, float& pdf, Xorshift32Generator& random_generator)\n{\n    // Clamping here because the dot product can eventually returns values less\n    // than -1 or greater than 1 because of precision errors in the vectors\n    // (in previous calculations)\n    float cos_theta_i = hippt::clamp(-1.0f, 1.0f, hippt::dot(surface_normal, -ray_direction));\n\n    if (cos_theta_i < 0.0f)\n    {\n        // We're inside the surface, we're going to flip the eta and the normal for\n        // the calculations that follow\n        // Note that this also flips the normal for the caller of this function\n        // since the normal is passed by reference. This is useful since the normal\n        // will be used for offsetting the new ray origin for example\n        cos_theta_i = -cos_theta_i;\n        surface_normal = -surface_normal;\n\n        float temp = eta_i;\n        eta_i = eta_t;\n        eta_t = temp;\n    }\n\n    // Computing the proportion of reflected light using fresnel equations\n    // We're going to use the result to decide whether to refract or reflect the\n    // ray\n    float fresnel_reflect = full_fresnel_dielectric(cos_theta_i, eta_i, eta_t);\n    if (random_generator() <= fresnel_reflect)\n    {\n        // Reflect the ray\n\n        out_bounce_direction = reflect_ray(-ray_direction, surface_normal);\n        pdf = fresnel_reflect;\n\n        return ColorRGB32F(fresnel_reflect) / hippt::dot(surface_normal, out_bounce_direction);\n    }\n    else\n    {\n        // Refract the ray\n\n        float3 refract_direction;\n        refract_ray(-ray_direction, surface_normal, refract_direction, eta_t / eta_i);\n\n        out_bounce_direction = refract_direction;\n        surface_normal = -surface_normal;\n        pdf = 1.0f - fresnel_reflect;\n\n        return ColorRGB32F(1.0f - fresnel_reflect) * material.base_color / hippt::dot(out_bounce_direction, surface_normal);\n    }\n}\n\n#endif"
  },
  {
    "path": "src/Device/includes/BSDFs/Lambertian.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_LAMBERTIAN_H\n#define DEVICE_LAMBERTIAN_H\n\n#include \"Device/includes/ONB.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F lambertian_brdf_eval(const DeviceUnpackedEffectiveMaterial& material, float NoL, float& pdf)\n{\n    pdf = 0.0f;\n\n    if (NoL <= 0.0f)\n        return ColorRGB32F(0.0f);\n\n    pdf = NoL * M_INV_PI;\n    return material.base_color * M_INV_PI;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float lambertian_brdf_pdf(const DeviceUnpackedEffectiveMaterial& material, float NoL)\n{\n    float pdf = 0.0f;\n\n    if (NoL <= 0.0f)\n        return 0.0f;\n\n    return NoL * M_INV_PI;\n}\n\n/**\n * If sampleDirectionOnly is 'true',, this function samples only the BSDF without\n * evaluating the contribution or the PDF of the BSDF. This function will then always return\n * ColorRGB32F(0.0f) and the 'pdf' out parameter will always be set to 0.0f\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F lambertian_brdf_sample(\n    const DeviceUnpackedEffectiveMaterial& material, \n    const float3& shading_normal, float3& sampled_direction, \n    float& pdf, Xorshift32Generator& random_number_generator, BSDFIncidentLightInfo& out_sampled_light_info)\n{\n    sampled_direction = cosine_weighted_sample_around_normal_world_space(shading_normal, random_number_generator);\n    \n    out_sampled_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_LOBE;\n\n    if constexpr (sampleDirectionOnly)\n    {\n        pdf = 0.0f;\n\n        return ColorRGB32F(0.0f);\n    }\n    else\n        return lambertian_brdf_eval(material, hippt::dot(shading_normal, sampled_direction), pdf);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/Microfacet.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_BSDF_MICROFACET_H\n#define DEVICE_BSDF_MICROFACET_H\n\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/BSDFs/MicrofacetEnergyCompensation.h\"\n\n // Clamping value for dot products when evaluating the GGX distribution\n // This helps with fireflies due to numerical imprecisions\n //\n // 1.0e-3f seems indistinguishable from 1.0e-8f (which is closer to\n // \"ground truth\" since we're not clamping as hard) except that 1.0e-8f\n // has a bunch of fireflies / is not very stable at all.\n //\n // So even though 1.0e-3f may seem a bit harsh, it's actually fine\n#define GGX_DOT_PRODUCTS_CLAMP 1.0e-3f\n\n/**\n * Evaluates the GGX anisotropic normal distribution function\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_anisotropic(float alpha_x, float alpha_y, const float3& local_microfacet_normal)\n{\n    float denom = (local_microfacet_normal.x * local_microfacet_normal.x) / (alpha_x * alpha_x) +\n        (local_microfacet_normal.y * local_microfacet_normal.y) / (alpha_y * alpha_y) +\n        (local_microfacet_normal.z * local_microfacet_normal.z);\n\n    return 1.0f / (M_PI * alpha_x * alpha_y * denom * denom);\n}\n\n/**\n * Evaluates the visible normal distribution function with GGX as\n * the normal disitrbution function\n *\n * Reference: [Sampling the GGX Distribution of Visible Normals, Heitz, 2018]\n * Equation 3\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_anisotropic_vndf(float D, float G1V, const float3& local_view_direction, const float3& local_microfacet_normal)\n{\n    float HoL = hippt::max(GGX_DOT_PRODUCTS_CLAMP, hippt::dot(local_view_direction, local_microfacet_normal));\n    return G1V * D * HoL / local_view_direction.z;\n}\n\n/**\n * Lambda function for the denominator of the G1 Smith masking/shadowing functions\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float G1_Smith_lambda(float alpha_x, float alpha_y, const float3& local_direction)\n{\n    float ax = local_direction.x * alpha_x;\n    float ay = local_direction.y * alpha_y;\n\n    return (-1.0f + sqrt(1.0f + (ax * ax + ay * ay) / (local_direction.z * local_direction.z))) * 0.5f;\n}\n\n/**\n * G1 Smith masking/shadowing (depending on whether local_direction is wo or wi) function\n *\n * Reference: [Understanding the Masking-Shadowing Function in Microfacet-Based BRDFs, Heitz, 2014]\n * Equation 43\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float G1_Smith(float alpha_x, float alpha_y, const float3& local_direction)\n{\n    float lambda = G1_Smith_lambda(alpha_x, alpha_y, local_direction);\n\n    return 1.0f / (1.0f + lambda);\n}\n\n/**\n * 'incident_light_direction_is_from_GGX_sample' should be true if the 'local_to_light_direction' given comes from\n * sampling the microfacet distribution that is being evaluated by this function call\n * \n * false otherwise (if 'local_to_light_direction' comes from light sampling NEE, or sampling another lobe of the BSDF, ...).\n * This parameter only matters if the BRDF is perfectly smooth: roughness < MaterialConstants::ROUGHNESS_CLAMP\n */\ntemplate <bool useMultipleScatteringEnergyCompensation>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F torrance_sparrow_GGX_eval_reflect(const HIPRTRenderData& render_data, float material_roughness, float material_anisotropy, bool material_do_energy_compensation, const ColorRGB32F& F, \n                                                                             const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector, \n                                                                             float& out_pdf, MaterialUtils::SpecularDeltaReflectionSampled incident_light_direction_is_from_GGX_sample,\n                                                                             int current_bounce)\n{\n    out_pdf = -1.0f;\n    return ColorRGB32F(-1.0f);\n}\n\n/**\n * Evaluates the Torrance Sparrow BRDF 'FDG / 4.NoL.NoV' with the\n * GGX as the microfacet distribution\n * function with single scattering (no energy compensation)\n *\n * 'incident_light_direction_is_from_GGX_sample' should be true if the 'local_to_light_direction' given comes from\n * sampling the microfacet distribution that is being evaluated by this function call\n * \n * false otherwise (if 'local_to_light_direction' comes from light sampling NEE, or sampling another lobe of the BSDF, ...).\n * This parameter only matters if the BRDF is perfectly smooth: roughness < MaterialConstants::ROUGHNESS_CLAMP\n * \n * Reference: [Sampling the GGX Distribution of Visible Normals, Heitz, 2018]\n * Equation 15\n */\ntemplate <>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F torrance_sparrow_GGX_eval_reflect<0>(const HIPRTRenderData& render_data, float material_roughness, float material_anisotropy, bool material_do_energy_compensation, const ColorRGB32F& F,\n                                                                                const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector, \n                                                                                float& out_pdf, MaterialUtils::SpecularDeltaReflectionSampled incident_light_direction_is_from_GGX_sample,\n                                                                                int current_bounce)\n{\n    out_pdf = 0.0f;\n\n    if (MaterialUtils::is_perfectly_smooth(material_roughness) && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE)\n    {\n        // Fast path for perfectly specular BRDF\n        if (incident_light_direction_is_from_GGX_sample == MaterialUtils::SpecularDeltaReflectionSampled::SPECULAR_PEAK_NOT_SAMPLED)\n            // For a perfectly smooth GGX distribution (a delta distribution), anything other than a\n            // perfectly sampled reflection direction is going to yield 0 contribution\n            return ColorRGB32F(0.0f);\n        else\n        {\n            if (hippt::dot(reflect_ray(local_view_direction, make_float3(0.0f, 0.0f, 1.0f)), local_to_light_direction) < MaterialConstants::DELTA_DISTRIBUTION_ALIGNEMENT_THRESHOLD)\n            {\n                // Just an additional check that we indeed have the incident light\n                // direction aligned with the perfect reflection direction\n                //\n                // This additional check is mainly useful for ReSTIR where we need\n                // to evaluate the BRDF with a sample that may have been sampled from\n                // a delta distribution at a neighbor (so it checks all the boxes for the shortcut\n                // and we could just quickly return MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE\n                // but because that sample wasn't sampled at the current pixel, there\n                // is a good chance that it doesn't actually align with the perfect\n                // reflection direction = it doesn't align with the specular peak = 0 contribution\n\n                out_pdf = 0.0f;\n                return ColorRGB32F(0.0f);\n            }\n\n            out_pdf = MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE;\n            return ColorRGB32F(MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE) * F / hippt::abs(local_to_light_direction.z);\n        }\n    }\n\n    if (local_to_light_direction.z < 0.0f)\n        // A direction that is below the surface is invalid for a microfacet ** BRDF **\n        return ColorRGB32F(0.0f);\n\n    float alpha_x;\n    float alpha_y;\n    MaterialUtils::get_alphas(material_roughness, material_anisotropy, alpha_x, alpha_y);\n\n    // GGX normal distribution\n    float D = GGX_anisotropic(alpha_x, alpha_y, local_halfway_vector);\n\n    // GGX visible normal distribution for evaluating the PDF\n    float lambda_V = G1_Smith_lambda(alpha_x, alpha_y, local_view_direction);\n    float G1V = 1.0f / (1.0f + lambda_V);\n    float Dvisible = GGX_anisotropic_vndf(D, G1V, local_view_direction, local_halfway_vector);\n\n    // Maxing to GGX_DOT_PRODUCTS_CLAMP here to avoid zeros and numerical imprecisions\n    // TODO note that we shouldn't need abs() here because we cannot have the view direction or to light direction below the surface\n    float NoV = hippt::max(GGX_DOT_PRODUCTS_CLAMP, hippt::abs(local_view_direction.z));\n    float NoL = hippt::max(GGX_DOT_PRODUCTS_CLAMP, hippt::abs(local_to_light_direction.z));\n\n    // Because we're exactly sampling the visible normals distribution function,\n    // that's exactly our PDF.\n    // \n    // Additionally, because we need to take into account the reflection operator\n    // that we're going to apply to get our final 'to light direction' and so the\n    // jacobian determinant of that reflection operator is the (4.0f * NoV) in the\n    // denominator\n    out_pdf = Dvisible / (4.0f * hippt::dot(local_view_direction, local_halfway_vector));\n    if (out_pdf == 0.0f)\n        return ColorRGB32F(0.0f);\n    else\n    {\n        float lambda_L = G1_Smith_lambda(alpha_x, alpha_y, local_to_light_direction);\n\n        if (render_data.bsdfs_data.GGX_masking_shadowing == GGXMaskingShadowingFlavor::HeightUncorrelated)\n        {\n            float G1L = 1.0f / (1.0f + lambda_L);\n            float G2 = G1V * G1L;\n\n            return F * D * G2 / (4.0f * NoL * NoV);\n        }\n        else // Default to GGXMaskingShadowingFlavor::HeightCorrelated\n        {\n            float G2HeightCorrelated = 1.0f / (1.0f + lambda_V + lambda_L);\n\n            return F * D * G2HeightCorrelated / (4.0f * NoL * NoV);\n        }\n    }\n}\n\n/**\n * 'incident_light_direction_is_from_GGX_sample' should be true if the 'local_to_light_direction' given comes from\n * sampling the microfacet distribution that is being evaluated by this function call\n *\n * false otherwise(if 'local_to_light_direction' comes from light sampling NEE, or sampling another lobe of the BSDF, ...).\n * This parameter only matters if the BRDF is perfectly smooth: roughness < MaterialConstants::ROUGHNESS_CLAMP\n */\ntemplate <>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F torrance_sparrow_GGX_eval_reflect<1>(const HIPRTRenderData& render_data, float material_roughness, float material_anisotropy, bool material_do_energy_compensation, const ColorRGB32F& F, \n                                                                                const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector, \n                                                                                float& out_pdf, MaterialUtils::SpecularDeltaReflectionSampled incident_light_direction_is_from_GGX_sample,\n                                                                                int current_bounce)\n{\n    ColorRGB32F ms_compensation_term = get_GGX_energy_compensation_conductors(render_data, F, material_roughness, material_do_energy_compensation, local_view_direction, current_bounce);\n    ColorRGB32F single_scattering = torrance_sparrow_GGX_eval_reflect<0>(render_data, material_roughness, material_anisotropy, false, F, \n        local_view_direction, local_to_light_direction, local_halfway_vector, \n        out_pdf, incident_light_direction_is_from_GGX_sample,\n        current_bounce);\n\n    return single_scattering * ms_compensation_term;\n}\n\n/**\n * Returns the PDF of the Torrance Sparrow BRDF 'FDG / 4.NoL.NoV' with the\n * GGX as the microfacet distribution\n *\n * 'incident_light_direction_is_from_GGX_sample' should be true if the 'local_to_light_direction' given comes from\n * sampling the microfacet distribution that is being evaluated by this function call\n *\n * false otherwise (if 'local_to_light_direction' comes from light sampling NEE, or sampling another lobe of the BSDF, ...).\n * This parameter only matters if the BRDF is perfectly smooth: roughness < MaterialConstants::ROUGHNESS_CLAMP\n *\n * Reference: [Sampling the GGX Distribution of Visible Normals, Heitz, 2018]\n * Equation 15\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float torrance_sparrow_GGX_pdf_reflect(const HIPRTRenderData& render_data, float material_roughness, float material_anisotropy,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector,\n    MaterialUtils::SpecularDeltaReflectionSampled incident_light_direction_is_from_GGX_sample)\n{\n    if (MaterialUtils::is_perfectly_smooth(material_roughness) && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE)\n    {\n        // Fast path for perfectly specular BRDF\n        if (incident_light_direction_is_from_GGX_sample == MaterialUtils::SpecularDeltaReflectionSampled::SPECULAR_PEAK_NOT_SAMPLED)\n            // For a perfectly smooth GGX distribution (a delta distribution), anything other than a\n            // perfectly sampled reflection direction is going to yield 0 contribution\n            return 0.0f;\n        else\n        {\n            if (hippt::dot(reflect_ray(local_view_direction, make_float3(0.0f, 0.0f, 1.0f)), local_to_light_direction) < MaterialConstants::DELTA_DISTRIBUTION_ALIGNEMENT_THRESHOLD)\n            {\n                // Just an additional check that we indeed have the incident light\n                // direction aligned with the perfect reflection direction\n                //\n                // This additional check is mainly useful for ReSTIR where we need\n                // to evaluate the BRDF with a sample that may have been sampled from\n                // a delta distribution at a neighbor (so it checks all the boxes for the shortcut\n                // and we could just quickly return MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE\n                // but because that sample wasn't sampled at the current pixel, there\n                // is a good chance that it doesn't actually align with the perfect\n                // reflection direction = it doesn't align with the specular peak = 0 contribution\n\n                return 0.0f;\n            }\n\n            return MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE;\n        }\n    }\n\n    if (local_to_light_direction.z < 0.0f)\n        // A direction that is below the surface is invalid for a microfacet ** BRDF **\n        return 0.0f;\n\n    float pdf = 0.0f;\n\n    float alpha_x;\n    float alpha_y;\n    MaterialUtils::get_alphas(material_roughness, material_anisotropy, alpha_x, alpha_y);\n\n    // GGX normal distribution\n    float D = GGX_anisotropic(alpha_x, alpha_y, local_halfway_vector);\n\n    // GGX visible normal distribution for evaluating the PDF\n    float lambda_V = G1_Smith_lambda(alpha_x, alpha_y, local_view_direction);\n    float G1V = 1.0f / (1.0f + lambda_V);\n    float Dvisible = GGX_anisotropic_vndf(D, G1V, local_view_direction, local_halfway_vector);\n\n    // Because we're exactly sampling the visible normals distribution function,\n    // that's exactly our PDF.\n    // \n    // Additionally, because we need to take into account the reflection operator\n    // that we're going to apply to get our final 'to light direction' and so the\n    // jacobian determinant of that reflection operator is the (4.0f * NoV) in the\n    // denominator\n    return Dvisible / (4.0f * hippt::dot(local_view_direction, local_halfway_vector));\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F torrance_sparrow_GGX_eval_refract(const DeviceUnpackedEffectiveMaterial& material, float roughness, float relative_eta, ColorRGB32F fresnel_reflectance,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector,\n    float& out_pdf, BSDFIncidentLightInfo incident_light_info)\n{\n    float NoL = local_to_light_direction.z;\n    float NoV = local_view_direction.z;\n    float HoL = hippt::dot(local_to_light_direction, local_halfway_vector);\n    float HoV = hippt::dot(local_view_direction, local_halfway_vector);\n\n    ColorRGB32F color;\n    if (MaterialUtils::is_perfectly_smooth(roughness) && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE)\n    {\n        // Fast path for specular glass\n        bool incident_direction_is_perfect_refraction = hippt::dot(refract_ray(local_view_direction, make_float3(0.0f, 0.0f, 1.0f), relative_eta), local_to_light_direction) > MaterialConstants::DELTA_DISTRIBUTION_ALIGNEMENT_THRESHOLD;\n        if (incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE && incident_direction_is_perfect_refraction)\n        {\n            // When the glass is perfectly smooth i.e. delta distribution, our only hope is to sample\n            // directly from the glass lobe. If we didn't sample from the glass lobe, this is going to be 0\n            // contribution\n\n            // Just some high value because this is a delta distribution\n            // And also, take fresnel into account\n            color = ColorRGB32F(MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE) * (ColorRGB32F(1.0f) - fresnel_reflectance) * material.base_color;\n            color /= hippt::abs(NoL);\n            out_pdf = MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE;\n        }\n        else\n        {\n            color = ColorRGB32F(0.0f);\n            out_pdf = 0.0f;\n        }\n    }\n    else\n    {\n        float dot_prod = HoL + HoV / relative_eta;\n        float dot_prod2 = dot_prod * dot_prod;\n        float denom = dot_prod2 * NoL * NoV;\n\n        float alpha_x;\n        float alpha_y;\n        MaterialUtils::get_alphas(roughness, material.anisotropy, alpha_x, alpha_y);\n\n        float D = GGX_anisotropic(alpha_x, alpha_y, local_halfway_vector);\n        float G1_V = G1_Smith(alpha_x, alpha_y, local_view_direction);\n        float G1_L = G1_Smith(alpha_x, alpha_y, local_to_light_direction);\n        float G2 = G1_V * G1_L;\n\n        float dwm_dwi = hippt::abs(HoL) / dot_prod2;\n        float D_pdf = G1_V / hippt::abs(NoV) * D * hippt::abs(HoV);\n        out_pdf = dwm_dwi * D_pdf;\n\n        // We added a check a few lines above to \"avoid dividing by 0 later on\". This is where.\n        // When NoL is 0, denom is 0 too and we're dividing by 0. \n        // The PDF of this case is as low as 1.0e-9 (light direction sampled perpendicularly to the normal)\n        // so this is an extremely rare case.\n        // The PDF being non-zero, we could actualy compute it, it's valid but not with floats :D\n        color = material.base_color * D * (ColorRGB32F(1.0f) - fresnel_reflectance) * G2 * hippt::abs(HoL * HoV / denom);\n    }\n\n    // Account for non-symmetric scattering when refracting\n    // Reference: https://www.pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF#Non-SymmetricScatteringandRefraction\n    color /= hippt::square(relative_eta);\n\n    return color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float torrance_sparrow_GGX_pdf_refract(const DeviceUnpackedEffectiveMaterial& material, float roughness, float relative_eta,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector,\n    BSDFIncidentLightInfo incident_light_info)\n{\n    float NoL = local_to_light_direction.z;\n    float NoV = local_view_direction.z;\n    float HoL = hippt::dot(local_to_light_direction, local_halfway_vector);\n    float HoV = hippt::dot(local_view_direction, local_halfway_vector);\n\n    ColorRGB32F color;\n    if (MaterialUtils::is_perfectly_smooth(roughness) && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE)\n    {\n        // Fast path for specular glass\n        bool incident_direction_is_perfect_refraction = hippt::dot(refract_ray(local_view_direction, make_float3(0.0f, 0.0f, 1.0f), relative_eta), local_to_light_direction) > MaterialConstants::DELTA_DISTRIBUTION_ALIGNEMENT_THRESHOLD;\n        if (incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE && incident_direction_is_perfect_refraction)\n            return MaterialConstants::DELTA_DISTRIBUTION_HIGH_VALUE;\n        else\n            return 0.0f;\n    }\n    else\n    {\n        float dot_prod = HoL + HoV / relative_eta;\n        float dot_prod2 = dot_prod * dot_prod;\n        float denom = dot_prod2 * NoL * NoV;\n\n        float alpha_x;\n        float alpha_y;\n        MaterialUtils::get_alphas(roughness, material.anisotropy, alpha_x, alpha_y);\n\n        float D = GGX_anisotropic(alpha_x, alpha_y, local_halfway_vector);\n        float G1_V = G1_Smith(alpha_x, alpha_y, local_view_direction);\n        float G1_L = G1_Smith(alpha_x, alpha_y, local_to_light_direction);\n        float G2 = G1_V * G1_L;\n\n        float dwm_dwi = hippt::abs(HoL) / dot_prod2;\n        float D_pdf = G1_V / hippt::abs(NoV) * D * hippt::abs(HoV);\n \n        return dwm_dwi * D_pdf;\n    }\n}\n\n/**\n * Reference: [Sampling the GGX Distribution of Visible Normals, Unity: Heitz ; 2018]\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 GGX_VNDF_sample(const float3 local_view_direction, float alpha_x, float alpha_y, Xorshift32Generator& random_number_generator)\n{\n    float r1 = random_number_generator();\n    float r2 = random_number_generator();\n\n    // Stretching the ellipsoid to the hemisphere configuration\n    float3 Vh = hippt::normalize(float3{ alpha_x * local_view_direction.x, alpha_y * local_view_direction.y, local_view_direction.z });\n\n    // Orthonormal basis construction\n    float lensq = Vh.x * Vh.x + Vh.y * Vh.y;\n    float3 T1 = lensq > 0.0f ? float3{ -Vh.y, Vh.x, 0 } / sqrt(lensq) : float3{ 1.0f, 0.0f, 0.0f };\n    float3 T2 = hippt::cross(Vh, T1);\n\n    // Parametrization of the projected area of the hemisphere\n    float r = sqrt(r1);\n    float phi = M_TWO_PI * r2;\n    float t1 = r * cos(phi);\n    float t2 = r * sin(phi);\n    float s = 0.5f * (1.0f + Vh.z);\n    t2 = (1.0f - s) * sqrt(1.0f - t1 * t1) + s * t2;\n\n    // Sampling the hemisphere\n    float3 Nh = t1 * T1 + t2 * T2 + sqrt(hippt::max(0.0f, 1.0f - t1 * t1 - t2 * t2)) * Vh;\n\n    // Un-stretching back to our ellipsoid\n    return hippt::normalize(float3{ alpha_x * Nh.x, alpha_y * Nh.y, hippt::max(0.0f, Nh.z) });\n}\n\n/**\n * Sample the distribution anisotropic GGX of visible normals using\n * the spherical caps formulation which is slightly faster than the traditional\n * VNDF sampling by Heitz 2018.\n *\n * Reference: [Sampling Visible GGX Normals with Spherical Caps, Dupuy, Benyoub, 2023]\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 GGX_VNDF_spherical_caps_sample(const float3 local_view_direction, float alpha_x, float alpha_y, Xorshift32Generator& random_number_generator)\n{\n    float r1 = random_number_generator();\n    float r2 = random_number_generator();\n\n    // Stretching the ellipsoid to the hemisphere configuration\n    float3 Vh = hippt::normalize(make_float3(alpha_x * local_view_direction.x, alpha_y * local_view_direction.y, local_view_direction.z));\n\n    // Sample a spherical cap in (-wi.z, 1]\n    float phi = M_TWO_PI * r1;\n    float z = (1.0f - r2) * (1.0f + Vh.z) - Vh.z;\n    float sinTheta = sqrtf(hippt::clamp(0.0f, 1.0f, 1.0f - z * z));\n    float x = sinTheta * cos(phi);\n    float y = sinTheta * sin(phi);\n    float3 c = make_float3(x, y, z);\n\n    // Compute microfacet normal\n    float3 Nh = c + Vh;\n\n    // Un-stretching back to our ellipsoid\n    return hippt::normalize(make_float3(alpha_x * Nh.x, alpha_y * Nh.y, Nh.z));\n}\n\n/**\n * Samples a microfacet normal from the distribution of visible normals of\n * the GGX normal function distribution\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 GGX_anisotropic_sample_microfacet(const float3& local_view_direction, float alpha_x, float alpha_y, Xorshift32Generator& random_number_generator)\n{\n    if (alpha_x <= MaterialConstants::ROUGHNESS_CLAMP && alpha_y <= MaterialConstants::ROUGHNESS_CLAMP)\n        // For delta GGX distribution, the sampled normal is always the same as the surface normal\n        // (so (0, 0, 1) in local space\n        //\n        // This is basically a small optimization to avoid to whole sampling routine\n        return make_float3(0.0f, 0.0f, 1.0f);\n\n#if PrincipledBSDFAnisotropicGGXSampleFunction == GGX_VNDF_SAMPLING\n    return GGX_VNDF_sample(local_view_direction, alpha_x, alpha_y, random_number_generator);\n#elif PrincipledBSDFAnisotropicGGXSampleFunction == GGX_VNDF_SPHERICAL_CAPS\n    return GGX_VNDF_spherical_caps_sample(local_view_direction, alpha_x, alpha_y, random_number_generator);\n#elif PrincipledBSDFAnisotropicGGXSampleFunction == GGX_VNDF_BOUNDED\n#else\n#endif\n}\n\n/*\n * Samples a microfacet normal from the distribution of visible normals of\n * the GGX normal function distribution and reflects the given view direction\n * about that microfacet normal to produce a 'to_light_direction' in local\n * shading space that is then returned by that function\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 microfacet_GGX_sample_reflection(float roughness, float anisotropy, const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    // The view direction can sometimes be below the shading normal hemisphere\n    // because of normal mapping / smooth normals\n    int below_normal = (local_view_direction.z < 0) ? -1 : 1;\n    float alpha_x, alpha_y;\n    MaterialUtils::get_alphas(roughness, anisotropy, alpha_x, alpha_y);\n\n    if (below_normal == -1)\n        below_normal *= 1.0f;\n\n    float3 microfacet_normal = GGX_anisotropic_sample_microfacet(local_view_direction * below_normal, alpha_x, alpha_y, random_number_generator);\n    float3 sampled_direction = reflect_ray(local_view_direction, microfacet_normal * below_normal);\n\n    // Should already be normalized but float imprecisions...\n    return hippt::normalize(sampled_direction);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/MicrofacetEnergyCompensation.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_BSDF_MICROFACET_ENERGY_COMPENSATION_H\n#define DEVICE_BSDF_MICROFACET_ENERGY_COMPENSATION_H\n\n#include \"Device/includes/Fresnel.h\"\n#include \"Device/includes/Texture.h\"\n\n#include \"Device/includes/SanityCheck.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n // To be able to access GPUBakerConstants::GGX_DIRECTIONAL_ALBEDO_TEXTURE_SIZE && GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\n /**\n  * References:\n  * [1] [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n  * [2] [Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n  * [3] [Dassault Enterprise PBR 2025 Specification]\n  * [4] [Google - Physically Based Rendering in Filament]\n  * [5] [MaterialX codebase on Github]\n  * [6] [Blender's Cycles codebase on Github]\n  */\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F get_GGX_energy_compensation_conductors(const HIPRTRenderData& render_data, const ColorRGB32F& F0, float material_roughness, bool material_do_energy_compensation, const float3& local_view_direction, int current_bounce)\n{\n\tbool max_bounce_reached = current_bounce > render_data.bsdfs_data.metal_energy_compensation_max_bounce && render_data.bsdfs_data.metal_energy_compensation_max_bounce > -1;\n\tbool smooth_enough = material_roughness <= render_data.bsdfs_data.energy_compensation_roughness_threshold;\n\tbool invalid_view_direction = local_view_direction.z < 0.0f;\n\tif (!material_do_energy_compensation || smooth_enough || max_bounce_reached || invalid_view_direction)\n\t\treturn ColorRGB32F(1.0f);\n\n    const void* GGX_directional_albedo_texture_pointer = nullptr;\n#ifdef __KERNELCC__\n    GGX_directional_albedo_texture_pointer = &render_data.bsdfs_data.GGX_conductor_directional_albedo;\n#else\n    GGX_directional_albedo_texture_pointer = render_data.bsdfs_data.GGX_conductor_directional_albedo;\n#endif\n\n    // Reading the precomputed directional albedo from the texture\n\tfloat2 uv = make_float2(hippt::max(0.0f, local_view_direction.z), material_roughness);\n\n\t// Flipping the Y manually (and that's why we pass 'false' in the sample call that follow)\n\t// because that GGX energy compensation texture is created with a clamp address mode, not wrap\n\t// and we have to do the Y-flipping manually when not sampling in wrap mode\n\tuv.y = 1.0f - uv.y;\n\tfloat Ess = sample_texture_rgb_32bits(GGX_directional_albedo_texture_pointer, 0, /* is_srgb */ false, uv, /* flip UV-Y */ false).r;\n\n    // Computing kms, [Practical multiple scattering compensation for microfacet models, Turquin, 2019], Eq. 10\n    float kms = (1.0f - Ess) / Ess;\n\n#if PrincipledBSDFDoMetallicFresnelEnergyCompensation == KERNEL_OPTION_TRUE\n    // [Practical multiple scattering compensation for microfacet models, Turquin, 2019], Eq. 15\n    ColorRGB32F fresnel_compensation_term = F0;\n#else\n    // 1.0f F so that the fresnel compensation has no effect\n    ColorRGB32F fresnel_compensation_term = ColorRGB32F(1.0f);\n#endif\n    // Computing the compensation term and multiplying by the single scattering non-energy conserving base GGX BRDF,\n    // Eq. 9\n    return ColorRGB32F(1.0f) + kms * fresnel_compensation_term;\n}\n\n/**\n * References:\n * [1] [Practical multiple scattering compensation for microfacet models, Turquin, 2019] [Main implementation]\n * [2] [Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n * [3] [Dassault Enterprise PBR 2025 Specification]\n * [4] [Google - Physically Based Rendering in Filament]\n * [5] [MaterialX codebase on Github]\n * [6] [Blender's Cycles codebase on Github]\n *\n * The energy compensation LUT for GGX Glass materials is computed by remapping cos_theta\n * with cos_theta^2.5\n *\n * However cos_theta^2.5 still results in energy gains at grazing angles so we're going to bias\n * the exponent used for fetching in the table here.\n *\n * This means that we store in the LUT during the precomputation but we're going to fetch\n * from the LUT with an exponent higher than 2.5f to try and force-remove energy gains\n *\n * The \"ideal\" exponent depends primarily on roughness so I've fined tuned some parameters\n * here to try and get the best white furnace tests\n *\n *\n * --------------------\n * If you're reading this code for a reference implementation, read what follows:\n * In the end, what we're doing here is to fix the unwanted energy gains that we have with\n * the base implementation as proposed in\n * [Practical multiple scattering compensation for microfacet models, Turquin, 2019].\n *\n * I don't think that these energy gains are supposed to happen, they are not mentioned\n * anywhere in the papers. And the papers use 32x32x32 tables. We use 256x16x192. And\n * we still have issues. I'm lead to believe that the issue is elsewhere in the codebase\n * but oh well... I can't find where this is coming from so we're fixing the broken code\n * instead of fixing the root of the issue which probably isn't what you should do if you're\n * reading this\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_glass_energy_compensation_get_correction_exponent(float roughness, float relative_eta)\n{\n    if (hippt::is_zero(roughness) || hippt::abs(1.0f - relative_eta) < 1.0e-3f)\n        // No correction for these, returning the original 2.5f that is used in the LUT\n        return 2.5f;\n\n\tfloat lower_relative_eta_bound = 1.01f;\n\tfloat lower_correction = 2.5f;\n\tif (relative_eta > 1.01f && relative_eta <= 1.02f)\n\t{\n\t\tlower_relative_eta_bound = 1.01f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.4f, 2.45f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.45f, 2.4665f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.4665f, 2.52f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.52f, 2.55f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = 2.55f;\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.55f, 2.585f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.585f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.02f && relative_eta <= 1.03f)\n\t{\n\t\tlower_relative_eta_bound = 1.02f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.51f, 2.54f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.54f, 2.565f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(2.565f, 2.57f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.57f, 2.59f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.59f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.03f && relative_eta <= 1.1f)\n\t{\n\t\tlower_relative_eta_bound = 1.03f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.51f, 2.544f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.544f, 2.565f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(2.565f, 2.58f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.58f, 2.6f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.1f && relative_eta <= 1.2f)\n\t{\n\t\tlower_relative_eta_bound = 1.1f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.54f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.54f, 2.575f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.575f, 2.61f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(2.61f, 2.63f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.63f, 2.6f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.2f && relative_eta <= 1.4f)\n\t{\n\t\tlower_relative_eta_bound = 1.2f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.55f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.55f, 2.65f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.65f, 2.675f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(2.675f, 2.7f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.7f, 2.675f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.675f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.4f && relative_eta <= 1.5f)\n\t{\n\t\tlower_relative_eta_bound = 1.4f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.7f, 2.875f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.875f, 2.925f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(2.925f, 2.95f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(2.95f, 2.8f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(2.8f, 2.55f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 1.5f && relative_eta <= 2.0f)\n\t{\n\t\tlower_relative_eta_bound = 1.5f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 1.6f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(1.6f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.7f, 2.95f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(2.95f, 3.1f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = 3.1f;\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(3.1f, 3.05f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(3.05f, 2.57f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 2.0f && relative_eta <= 2.4f)\n\t{\n\t\tlower_relative_eta_bound = 2.0f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(1.5f, 2.2f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.2f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 2.75f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(2.75f, 3.5f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(3.5f, 4.85f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(4.85f, 6.0f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(6.0f, 7.0f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(7.0f, 2.57f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta > 2.4f && relative_eta <= 3.0f)\n\t{\n\t\tlower_relative_eta_bound = 2.4f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\tlower_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\tlower_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\tlower_correction = hippt::lerp(1.5f, 2.0f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\tlower_correction = hippt::lerp(2.0f, 2.44f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\tlower_correction = hippt::lerp(2.44f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\tlower_correction = hippt::lerp(2.475f, 3.0f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\tlower_correction = hippt::lerp(3.0f, 3.8f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\tlower_correction = hippt::lerp(3.8f, 7.0f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\tlower_correction = hippt::lerp(7.0f, 10.0f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\tlower_correction = hippt::lerp(10.0f, 12.0f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\tlower_correction = hippt::lerp(12.0f, 3.9f, (roughness - 0.9f) / 0.1f);\n\t}\n\n\tfloat higher_relative_eta_bound = 1.01f;\n\tfloat higher_correction = 2.5f;\n\tif (relative_eta <= 1.01f)\n\t{\n\t\thigher_relative_eta_bound = 1.01f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.4f, 2.45f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.45f, 2.4665f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.4665f, 2.52f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.52f, 2.55f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = 2.55f;\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.55f, 2.585f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.585f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.02f)\n\t{\n\t\thigher_relative_eta_bound = 1.02f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.51f, 2.54f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.54f, 2.565f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(2.565f, 2.57f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.57f, 2.59f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.59f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.03f)\n\t{\n\t\thigher_relative_eta_bound = 1.03f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.4f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.4f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.51f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.51f, 2.544f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.544f, 2.565f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(2.565f, 2.58f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.58f, 2.6f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.1f)\n\t{\n\t\thigher_relative_eta_bound = 1.1f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.54f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.54f, 2.575f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.575f, 2.61f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(2.61f, 2.63f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.63f, 2.6f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.6f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.2f)\n\t{\n\t\thigher_relative_eta_bound = 1.2f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.55f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.55f, 2.65f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.65f, 2.675f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(2.675f, 2.7f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.7f, 2.675f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.675f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.4f)\n\t{\n\t\thigher_relative_eta_bound = 1.4f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.8f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.8f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.7f, 2.875f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.875f, 2.925f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(2.925f, 2.95f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(2.95f, 2.8f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(2.8f, 2.55f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 1.5f)\n\t{\n\t\thigher_relative_eta_bound = 1.5f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.6f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.6f, 2.3f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.3f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.7f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.7f, 2.95f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(2.95f, 3.1f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = 3.1f;\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(3.1f, 3.05f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(3.05f, 2.57f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 2.0f)\n\t{\n\t\thigher_relative_eta_bound = 2.0f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.5f, 2.2f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.2f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.75f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.75f, 3.5f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(3.5f, 4.85f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(4.85f, 6.0f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(6.0f, 7.0f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(7.0f, 2.57f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse if (relative_eta <= 2.4f)\n\t{\n\t\thigher_relative_eta_bound = 2.4f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.5f, 2.0f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(2.0f, 2.44f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.44f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 3.0f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(3.0f, 3.8f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(3.8f, 7.0f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(7.0f, 10.0f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(10.0f, 12.0f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(12.0f, 3.9f, (roughness - 0.9f) / 0.1f);\n\t}\n\telse\n\t{\n\t\thigher_relative_eta_bound = 3.0f;\n\n\t\tif (roughness <= 0.0f)\n\t\t\thigher_correction = 2.5f;\n\t\telse if (roughness <= 0.1f)\n\t\t\thigher_correction = hippt::lerp(2.5f, 1.5f, (roughness - 0.0f) / 0.1f);\n\t\telse if (roughness <= 0.2f)\n\t\t\thigher_correction = hippt::lerp(1.5f, 1.7f, (roughness - 0.1f) / 0.1f);\n\t\telse if (roughness <= 0.3f)\n\t\t\thigher_correction = hippt::lerp(1.7f, 2.38f, (roughness - 0.2f) / 0.1f);\n\t\telse if (roughness <= 0.4f)\n\t\t\thigher_correction = hippt::lerp(2.38f, 2.475f, (roughness - 0.3f) / 0.1f);\n\t\telse if (roughness <= 0.5f)\n\t\t\thigher_correction = hippt::lerp(2.475f, 2.9f, (roughness - 0.4f) / 0.1f);\n\t\telse if (roughness <= 0.6f)\n\t\t\thigher_correction = hippt::lerp(2.9f, 3.8f, (roughness - 0.5f) / 0.1f);\n\t\telse if (roughness <= 0.7f)\n\t\t\thigher_correction = hippt::lerp(3.8f, 7.5f, (roughness - 0.6f) / 0.1f);\n\t\telse if (roughness <= 0.8f)\n\t\t\thigher_correction = hippt::lerp(7.5f, 12.0f, (roughness - 0.7f) / 0.1f);\n\t\telse if (roughness <= 0.9f)\n\t\t\thigher_correction = hippt::lerp(12.0f, 13.75f, (roughness - 0.8f) / 0.1f);\n\t\telse if (roughness <= 1.0f)\n\t\t\thigher_correction = hippt::lerp(13.75f, 2.5f, (roughness - 0.9f) / 0.1f);\n\t}\n\n\tif (higher_relative_eta_bound == lower_relative_eta_bound)\n\t\t// Arbitrarily returning the lower correction \n\t\treturn lower_correction;\n\n\treturn hippt::lerp(lower_correction, higher_correction, (relative_eta - lower_relative_eta_bound) / (higher_relative_eta_bound - lower_relative_eta_bound));\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_GGX_energy_compensation_dielectrics(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, float custom_roughness, bool inside_object, float eta_t, float eta_i, float relative_eta, float NoV, int current_bounce)\n{\n\tbool smooth_enough = custom_roughness <= render_data.bsdfs_data.energy_compensation_roughness_threshold;\n\tbool max_bounce_reached = current_bounce > render_data.bsdfs_data.glass_energy_compensation_max_bounce && render_data.bsdfs_data.glass_energy_compensation_max_bounce > -1;\n\tif (!material.do_glass_energy_compensation || smooth_enough || max_bounce_reached)\n\t\treturn 1.0f;\n\n\tfloat compensation_term = 1.0f;\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoGlassEnergyCompensation == KERNEL_OPTION_TRUE\n\t// Not doing energy compensation if the thin-film is fully present\n\t// See the // TODO FIX THIS HORROR below\n\t//\n\t// Also not doing compensation if we already have full compensation on the material\n\t// because the energy compensation of the glass lobe here is then redundant\n\tif (material.thin_film < 1.0f)\n\t{\n\t\tfloat relative_eta_for_correction = inside_object ? 1.0f / relative_eta : relative_eta;\n\t\tfloat exponent_correction = 2.5f;\n\t\tif (!material.thin_walled)\n\t\t\texponent_correction = GGX_glass_energy_compensation_get_correction_exponent(custom_roughness, relative_eta_for_correction);\n\n\t\t// We're storing cos_theta_o^2.5 in the LUT so we're retrieving it with pow(1.0f / 2.5f) i.e.\n\t\t// sqrt 2.5\n\t\t//\n\t\t// We're using a \"correction exponent\" to forcefully get rid of energy gains at grazing angles due\n\t\t// to float precision issues: storing in the LUT with cos_theta^2.5 but fetching with pow(1.0f / 2.6f)\n\t\t// for example (instead of fetching with pow(1.0f / 2.5f)) darkens the overall appearance and helps remove\n\t\t// energy gains\n\t\tfloat view_direction_tex_fetch = powf(hippt::max(1.0e-3f, NoV), 1.0f / exponent_correction);\n\n\t\tfloat F0 = F0_from_eta(eta_t, eta_i);\n\t\t// sqrt(sqrt()) of F0 here because we're storing F0^4 in the LUT\n\t\tfloat F0_remapped = sqrt(sqrt(F0));\n\n\t\tfloat3 uvw = make_float3(view_direction_tex_fetch, custom_roughness, F0_remapped);\n\t\tif (material.thin_walled)\n\t\t{\n\t\t\tvoid* texture = render_data.bsdfs_data.GGX_thin_glass_directional_albedo;\n\t\t\tint3 dims = make_int3(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O, GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS, GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n\n\t\t\tcompensation_term = sample_texture_3D_rgb_32bits(texture, dims, uvw, render_data.bsdfs_data.use_hardware_tex_interpolation).r;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tvoid* texture = inside_object ? render_data.bsdfs_data.GGX_glass_directional_albedo_inverse : render_data.bsdfs_data.GGX_glass_directional_albedo;\n\t\t\tint3 dims = make_int3(GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O, GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS, GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n\n\t\t\tcompensation_term = sample_texture_3D_rgb_32bits(texture, dims, uvw, render_data.bsdfs_data.use_hardware_tex_interpolation).r;\n\t\t}\n\n\t\t// TODO FIX THIS HORROR\n\t\t// This is here because directional albedo for the glass BSDF is tabulated with the standard non-colored Fresnel\n\t\t// This means that the precomputed table is incompatible with the thin-film interference fresnel\n\t\t// \n\t\t// And as a matter of fact, using the energy compensation term (precomputed for the traditional fresnel)\n\t\t// with thin-film interference Fresnel results in noticeable energy gains at grazing angles at high roughnesses\n\t\t//\n\t\t// Blender Cycles doesn't have that issue but I don't understand yet how they avoid it.\n\t\t//\n\t\t// The quick and disgusting solution here is just to disable energy compensation as the thin-film\n\t\t// weight gets stronger. Energy compensation is fully disabled when the thin-film weight is 1.0f\n\t\t//\n\t\t// Because the error is stronger at high roughnesses than at low roughnesses, we can include the roughness\n\t\t// in the lerp such that we use less and less the energy compensation term as the roughness increases\n\t\tcompensation_term = hippt::lerp(compensation_term, 1.0f, material.thin_film * custom_roughness);\n\t}\n#endif\n\n\treturn compensation_term;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_GGX_energy_compensation_dielectrics(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, bool inside_object, float eta_t, float eta_i, float relative_eta, float NoV, int current_bounce)\n{\n\treturn get_GGX_energy_compensation_dielectrics(render_data, material, material.roughness, inside_object, eta_t, eta_i, relative_eta, NoV, current_bounce);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/MicrofacetRegularization.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDE_MICROFACET_REGULARIZATION_H\n#define DEVICE_INCLUDE_MICROFACET_REGULARIZATION_H\n\n#include \"HostDeviceCommon/KernelOptions/PrincipledBSDFKernelOptions.h\"\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/MicrofacetRegularizationSettings.h\"\n\nstruct MicrofacetRegularization\n{\n\tenum class RegularizationMode : unsigned char\n\t{\n\t\tNO_REGULARIZATION = 0,\n\t\tREGULARIZATION_CLASSIC = 1, // Should be used when the regularized BSDF PDF isn't going to be used in a MIS weight\n\t\tREGULARIZATION_MIS = 2, // Should be used when the regularized BSDF PDF ** is ** going to be used in a MIS weight or if this is for evaluating a BSDF whose sample comes from MIS sampling\n\t};\n\n\tHIPRT_HOST_DEVICE static float regularize_reflection(const MicrofacetRegularizationSettings& regularization_settings, RegularizationMode regularization_mode, float initial_roughness, float accumulated_path_roughness, int sample_number)\n\t{\n#if PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_FALSE\n\t\treturn initial_roughness;\n#endif\n\n\t\tif (regularization_mode == RegularizationMode::NO_REGULARIZATION)\n\t\t\treturn initial_roughness;\n\n\t\tfloat consistent_tau = MicrofacetRegularization::consistent_tau(regularization_settings.tau_0, sample_number);\n\t\t// Note that the diffusion heuristic that we're using here is not the one proposed in the paper\n\t\t// because the one of the paper requires the mean curvature of the surface and this requires additional\n\t\t// per vertex data to be computed... Sounds a bit heavy just for path regularization\n\t\t//\n\t\t// So instead, we're just using the maximum roughness found on the path so far (which is\n\t\t// 'accumulated_path_roughness') to decide whether or not we should use a strong regularization\n\t\t// or not.\n\t\t//\n\t\t// Caustics only happen on diffuse surfaces (roughness 1). So for such a surface, tau should be\n\t\t// unchanged i.e., we use the full regularization.\n\t\t// \n\t\t// But for smooth surfaces (mirrors, clear glass), we shouldn't regularize anything to keep the sharpness\n\t\t// of the glossy reflections.\n\t\t//\n\t\t// By dividing by a roughness close to 0, tau skyrockets and regularization is essentially disabled \n\t\tfloat path_diffusion_tau = consistent_tau / hippt::max(hippt::square(accumulated_path_roughness), 1.0e-8f);\n\n#if PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic == KERNEL_OPTION_TRUE\n\t\tfloat final_tau = path_diffusion_tau;\n#else\n\t\tfloat final_tau = consistent_tau;\n#endif\n\n\t\tfloat regularized_roughness = sqrtf(sqrtf(1.0f / (final_tau * M_PI)));\n\n\t\treturn hippt::max(regularization_settings.min_roughness, hippt::max(initial_roughness, regularized_roughness));\n\t}\n\n\tHIPRT_HOST_DEVICE static float regularize_refraction(const MicrofacetRegularizationSettings& regularization_settings, RegularizationMode regularization_mode, float initial_roughness, float accumulated_path_roughness, float eta_i, float eta_t, int sample_number)\n\t{\n#if PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_FALSE\n\t\treturn initial_roughness;\n#endif\n\n\t\tif (regularization_mode == RegularizationMode::NO_REGULARIZATION)\n\t\t\treturn initial_roughness;\n\n\t\tfloat consistent_tau = MicrofacetRegularization::consistent_tau(regularization_settings.tau_0, sample_number + 1);\n\t\t// Note that the diffusion heuristic that we're using here is not the one proposed in the paper\n\t\t// because the one of the paper requires the mean curvature of the surface and this requires additional\n\t\t// per vertex data to be computed... Sounds a bit heavy just for path regularization\n\t\t//\n\t\t// So instead, we're just using the maximum roughness found on the path so far (which is\n\t\t// 'accumulated_path_roughness') to decide whether or not we should use a strong regularization\n\t\t// or not.\n\t\t//\n\t\t// Caustics only happen on diffuse surfaces (roughness 1). So for such a surface, tau should be\n\t\t// unchanged i.e., we use the full regularization.\n\t\t// \n\t\t// But for smooth surfaces (mirrors, clear glass), we shouldn't regularize anything to keep the sharpness\n\t\t// of the glossy reflections.\n\t\t//\n\t\t// By dividing by a roughness close to 0, tau skyrockets and regularization is essentially disabled\n\t\tfloat path_diffusion_tau = consistent_tau / hippt::max(hippt::square(accumulated_path_roughness), 1.0e-8f);\n\n#if PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic == KERNEL_OPTION_TRUE\n\t\tfloat final_tau = path_diffusion_tau;\n#else\n\t\tfloat final_tau = consistent_tau;\n#endif\n\n\t\tfloat regularized_roughness = sqrtf(sqrtf(1.0f / (final_tau * M_PI * hippt::square(eta_i - eta_t) / (4.0f * hippt::square(hippt::max(eta_i, eta_t))))));\n\n\t\treturn hippt::max(regularization_settings.min_roughness, hippt::max(initial_roughness, regularized_roughness));\n\t}\n\n\tHIPRT_HOST_DEVICE static float regularize_mix_reflection_refraction(const MicrofacetRegularizationSettings& regularization_settings, RegularizationMode regularization_mode, float initial_roughness, float accumulated_path_roughness, float eta_i, float eta_t, int sample_number)\n\t{\n#if PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_FALSE\n\t\treturn initial_roughness;\n#endif\n\n\t\tif (regularization_mode == RegularizationMode::NO_REGULARIZATION)\n\t\t\treturn initial_roughness;\n\n\t\tfloat consistent_tau = MicrofacetRegularization::consistent_tau(regularization_settings.tau_0, sample_number);\n\t\t// Note that the diffusion heuristic that we're using here is not the one proposed in the paper\n\t\t// because the one of the paper requires the mean curvature of the surface and this requires additional\n\t\t// per vertex data to be computed... Sounds a bit heavy just for path regularization\n\t\t//\n\t\t// So instead, we're just using the maximum roughness found on the path so far (which is\n\t\t// 'accumulated_path_roughness') to decide whether or not we should use a strong regularization\n\t\t// or not.\n\t\t//\n\t\t// Caustics only happen on diffuse surfaces (roughness 1). So for such a surface, tau should be\n\t\t// unchanged i.e., we use the full regularization.\n\t\t// \n\t\t// But for smooth surfaces (mirrors, clear glass), we shouldn't regularize anything to keep the sharpness\n\t\t// of the glossy reflections.\n\t\t//\n\t\t// By dividing by a roughness close to 0, tau skyrockets and regularization is essentially disabled\n\t\tfloat path_diffusion_tau = consistent_tau / hippt::max(hippt::square(accumulated_path_roughness), 1.0e-8f);\n\n#if PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic == KERNEL_OPTION_TRUE\n\t\tfloat final_tau = path_diffusion_tau;\n#else\n\t\tfloat final_tau = consistent_tau;\n#endif\n\n\t\tfloat regularized_roughness_reflection = sqrtf(sqrtf(1.0f / (final_tau * M_PI)));\n\n\t\tif (eta_i == eta_t)\n\t\t\t// Avoiding singularities.\n\t\t\t// \n\t\t\t// The refraction regularized roughness will be degenerate here so we're just using the reflection\n\t\t\t// regularization\n\t\t\treturn regularized_roughness_reflection;\n\n\t\tfloat regularized_roughness_refraction = sqrtf(sqrtf(1.0f / (final_tau * M_PI * hippt::square(eta_i - eta_t) / (4.0f * hippt::square(hippt::max(eta_i, eta_t))))));\n\n\t\t// Mixing both reflection and refraction regularized roughnesses.\n\t\t// Refraction regularization tends to be stronger (higher resulting roughness).\n\t\t//\n\t\t// We're biasing (75%) towards refraction to bias towards higher regularization to conservatively\n\t\t// reduce variance\n\t\treturn hippt::max(regularization_settings.min_roughness, hippt::max(initial_roughness, regularized_roughness_refraction * 0.75f + regularized_roughness_reflection * 0.25f));\n\t}\n\n\t/**\n\t * 'sample_number\" should be >= 1\n\t */\n\tHIPRT_HOST_DEVICE static float consistent_tau(float tau_0, int sample_number)\n\t{\n#if PrincipledBSDFDoMicrofacetRegularizationConsistentParameterization == KERNEL_OPTION_FALSE\n\t\treturn tau_0;\n#endif\n\n\t\t// Eq. 16 of the paper\n\t\treturn 1.0f / (2.0f * M_PI * (1.0f - cosf(atanf(powf(sample_number + 1, -1.0f / 6.0f) * sqrt(M_FOUR_PI * tau_0 - 1.0f) / (M_TWO_PI * tau_0 - 1.0f)))));\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/OrenNayar.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_OREN_NAYAR_H\n#define DEVICE_OREN_NAYAR_H\n\n#include \"Device/includes/Sampling.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\n/* References:\n * [1] [Physically Based Rendering 3rd Edition] https://www.pbr-book.org/3ed-2018/Reflection_Models/Microfacet_Models\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F oren_nayar_brdf_eval(const DeviceUnpackedEffectiveMaterial& material, const float3& local_view_direction, const float3& local_to_light_direction, float& pdf)\n{\n    // sin(theta)^2 = 1.0 - cos(theta)^2\n    float sin_theta_i = sqrt(1.0f - local_to_light_direction.z * local_to_light_direction.z);\n    float sin_theta_o = sqrt(1.0f - local_view_direction.z * local_view_direction.z);\n\n    // max_cos here is going to be cos(phi_to_light - phi_view_direction)\n    // but computed as cos(phi_light) * cos(phi_view) + sin(phi_light) * sin(phi_view)\n    // according to cos(a - b) = cos(a) * cos(b) + sin(a) * sin(b)\n    float max_cos = 0;\n    if (sin_theta_i > 1.0e-4f && sin_theta_o > 1.0e-4f)\n    {\n        float sin_phi_i = local_to_light_direction.y / sin_theta_i;\n        float cos_phi_i = local_to_light_direction.x / sin_theta_i;\n\n        float sin_phi_o = local_view_direction.y / sin_theta_o;\n        float cos_phi_o = local_view_direction.x / sin_theta_o;\n\n        float d_cos = cos_phi_i * cos_phi_o + sin_phi_i * sin_phi_o;\n\n        max_cos = hippt::max(0.0f, d_cos);\n    }\n\n    float sin_alpha, tan_beta;\n    if (hippt::abs(local_to_light_direction.z) > hippt::abs(local_view_direction.z))\n    {\n        sin_alpha = sin_theta_o;\n        tan_beta = sin_theta_i / hippt::abs(local_to_light_direction.z);\n    }\n    else\n    {\n        sin_alpha = sin_theta_i;\n        tan_beta = sin_theta_o / hippt::abs(local_view_direction.z);\n    }\n\n    float oren_nayar_A;\n    float oren_nayar_B;\n    MaterialUtils::get_oren_nayar_AB(material.oren_nayar_sigma, oren_nayar_A, oren_nayar_B);\n\n    pdf = local_to_light_direction.z * M_INV_PI;\n    return material.base_color * M_INV_PI * (oren_nayar_A + oren_nayar_B * max_cos * sin_alpha * tan_beta);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float oren_nayar_brdf_pdf(const DeviceUnpackedEffectiveMaterial& material, const float3& local_view_direction, const float3& local_to_light_direction)\n{\n    if (local_to_light_direction.z <= 0.0f)\n        return 0.0f;\n\n    return local_to_light_direction.z * M_INV_PI;\n}\n\n/**\n * Override of the eval function for world space directions\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F oren_nayar_brdf_eval(const DeviceUnpackedEffectiveMaterial& material, const float3& world_space_view_direction, const float3& surface_normal, const float3& world_space_to_light_direction, float& pdf)\n{\n    float3 T, B;\n    build_ONB(surface_normal, T, B);\n\n    // Using local view and light directions to simply following computations\n    float3 local_view_direction = world_to_local_frame(T, B, surface_normal, world_space_view_direction);\n    float3 local_to_light_direction = world_to_local_frame(T, B, surface_normal, world_space_to_light_direction);\n\n    return oren_nayar_brdf_eval(material, local_view_direction, local_to_light_direction, pdf);\n}\n\n/**\n * If sampleDirectionOnly is 'true',, this function samples only the BSDF without\n * evaluating the contribution or the PDF of the BSDF. This function will then always return\n * ColorRGB32F(0.0f) and the 'pdf' out parameter will always be set to 0.0f\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F oren_nayar_brdf_sample(const DeviceUnpackedEffectiveMaterial& material, \n    const float3& world_space_view_direction, const float3& shading_normal, float3& out_sampled_direction, \n    float& pdf, Xorshift32Generator& random_number_generator, BSDFIncidentLightInfo* out_sampled_light_info = nullptr)\n{\n    out_sampled_direction = cosine_weighted_sample_around_normal_world_space(shading_normal, random_number_generator);\n    if (out_sampled_light_info != nullptr)\n        *out_sampled_light_info = BSDFIncidentLightInfo::NO_INFO;\n\n    if constexpr (sampleDirectionOnly)\n    {\n        pdf = 0.0f;\n\n        return ColorRGB32F(0.0f);\n    }\n    else\n        return oren_nayar_brdf_eval(material, world_space_view_direction, shading_normal, out_sampled_direction, pdf);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/Principled.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_PRINCIPLED_H\n#define DEVICE_PRINCIPLED_H\n\n#include \"Device/includes/Dispersion.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/ONB.h\"\n#include \"Device/includes/BSDFs/Lambertian.h\"\n#include \"Device/includes/BSDFs/Microfacet.h\"\n#include \"Device/includes/BSDFs/MicrofacetRegularization.h\"\n#include \"Device/includes/BSDFs/OrenNayar.h\"\n#include \"Device/includes/BSDFs/PrincipledEnergyCompensation.h\"\n#include \"Device/includes/BSDFs/ThinFilm.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/BSDFs/SheenLTC.h\"\n\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n /** References:\n  *\n  * [1] [CSE 272 University of California San Diego - Disney BSDF Homework] https://cseweb.ucsd.edu/~tzli/cse272/wi2024/homework1.pdf\n  * [2] [GLSL Path Tracer implementation by knightcrawler25] https://github.com/knightcrawler25/GLSL-PathTracer\n  * [3] [SIGGRAPH 2012 Course] https://blog.selfshadow.com/publications/s2012-shading-course/#course_content\n  * [4] [SIGGRAPH 2015 Course] https://blog.selfshadow.com/publications/s2015-shading-course/#course_content\n  * [5] [Burley 2015 Course Notes - Extending the Disney BRDF to a BSDF with Integrated Subsurface Scattering] https://blog.selfshadow.com/publications/s2015-shading-course/burley/s2015_pbs_disney_bsdf_notes.pdf\n  * [6] [PBRT v3 Source Code] https://github.com/mmp/pbrt-v3\n  * [7] [PBRT v4 Source Code] https://github.com/mmp/pbrt-v4\n  * [8] [Blender's Cycles Source Code] https://github.com/blender/cycles\n  * [9] [Autodesk Standard Surface] https://autodesk.github.io/standard-surface/\n  * [10] [Blender Principled BSDF] https://docs.blender.org/manual/fr/dev/render/shader_nodes/shader/principled.html\n  * [11] [Open PBR Specification] https://academysoftwarefoundation.github.io/OpenPBR/\n  * [12] [Enterprise PBR Specification] https://dassaultsystemes-technology.github.io/EnterprisePBRShadingModel/spec-2025x.md.html\n  * [13] [Arbitrarily Layered Micro-Facet Surfaces, Weidlich, Wilkie] https://www.cg.tuwien.ac.at/research/publications/2007/weidlich_2007_almfs/weidlich_2007_almfs-paper.pdf\n  * [14] [A Practical Extension to Microfacet Theory for the Modeling of Varying Iridescence, Belcour, Barla, 2017] https://belcour.github.io/blog/research/publication/2017/05/01/brdf-thin-film.html\n  * [15] [MaterialX Implementation Code] https://github.com/AcademySoftwareFoundation/MaterialX\n  * [16] [Khronos GLTF 2.0 KHR_materials_iridescence Implementation Notes] https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_iridescence/README.md\n  * [17] [Khronos GLTF 2.0 KHR_materials_diffuse_transmission Implementation Notes] https://github.com/KhronosGroup/glTF/blob/main/extensions/2.0/Khronos/KHR_materials_diffuse_transmission/README.md\n  * \n  * Important note: none of the lobes of this implementation includes the cosine term.\n  * The cosine term NoL needs to be taken into account outside of the BSDF\n  */\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_coat_eval(const HIPRTRenderData& render_data, const BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector,\n    float incident_medium_ior, float& out_pdf)\n{\n    // The coat lobe is just a microfacet lobe\n    float HoL = hippt::clamp(1.0e-8f, 1.0f, hippt::dot(local_halfway_vector, local_to_light_direction));\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.coat_roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n    // We're only evaluating the coat lobe if, either:\n    // - The incident light direction was sampled from the clearcoat lobe\n    // - The coat is not a delta distribution (not perfectly smooth)\n    // - The incident light direction was sampled from another perfectly specular lobe\n    //\n    // Because if none of these two conditions are true, the evaluation of the coat will\n    // yield 0.0f anyways\n    //\n    // All the conditions are handled in 'is_specular_delta_reflection_sampled'\n    MaterialUtils::SpecularDeltaReflectionSampled coat_delta_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, bsdf_context.material.coat_anisotropy, bsdf_context.incident_light_info);\n\n    ColorRGB32F F = ColorRGB32F(full_fresnel_dielectric(HoL, incident_medium_ior, bsdf_context.material.coat_ior));\n\n    return torrance_sparrow_GGX_eval_reflect<0>(render_data, regularized_roughness, bsdf_context.material.coat_anisotropy, false, F,\n        local_view_direction, local_to_light_direction, local_halfway_vector,\n        out_pdf, coat_delta_direction_sampled, bsdf_context.current_bounce);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_coat_pdf(const HIPRTRenderData& render_data, const BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_halfway_vector,\n    float incident_medium_ior)\n{\n    // The coat lobe is just a microfacet lobe\n    float HoL = hippt::clamp(1.0e-8f, 1.0f, hippt::dot(local_halfway_vector, local_to_light_direction));\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.coat_roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n    // We're only evaluating the coat lobe if, either:\n    // - The incident light direction was sampled from the clearcoat lobe\n    // - The coat is not a delta distribution (not perfectly smooth)\n    // - The incident light direction was sampled from another perfectly specular lobe\n    //\n    // Because if none of these two conditions are true, the evaluation of the coat will\n    // yield 0.0f anyways\n    //\n    // All the conditions are handled in 'is_specular_delta_reflection_sampled'\n    MaterialUtils::SpecularDeltaReflectionSampled coat_delta_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, bsdf_context.material.coat_anisotropy, bsdf_context.incident_light_info);\n\n    return torrance_sparrow_GGX_pdf_reflect(render_data, regularized_roughness, bsdf_context.material.coat_anisotropy,\n        local_view_direction, local_to_light_direction, local_halfway_vector, coat_delta_direction_sampled);\n}\n\n/**\n * The sampled direction is returned in the local shading frame of the basis used for 'local_view_direction'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_coat_sample(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.coat_roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n    return microfacet_GGX_sample_reflection(regularized_roughness, bsdf_context.material.coat_anisotropy, local_view_direction, random_number_generator);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_sheen_eval(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction, float& pdf, float& out_sheen_reflectance)\n{\n    return sheen_ltc_eval(render_data, material, local_to_light_direction, local_view_direction, pdf, out_sheen_reflectance);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_sheen_pdf(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction)\n{\n    return sheen_ltc_pdf(render_data, material, local_to_light_direction, local_view_direction);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_sheen_sample(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, \n    const float3& local_view_direction, const float3& shading_normal, Xorshift32Generator& random_number_generator)\n{\n    return sheen_ltc_sample(render_data, material, local_view_direction, shading_normal, random_number_generator);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_metallic_eval(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    float roughness, float anisotropy, float incident_ior,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_half_vector,\n    float& pdf)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n    MaterialUtils::SpecularDeltaReflectionSampled metal_delta_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, anisotropy, bsdf_context.incident_light_info);\n    if (metal_delta_direction_sampled == MaterialUtils::SpecularDeltaReflectionSampled::SPECULAR_PEAK_NOT_SAMPLED)\n    {\n        // The distribution isn't worth evaluating because it's specular but we the incident\n        // light direction wasn't sampled from a specular distribution\n\n        pdf = 0.0f;\n        return ColorRGB32F(0.0f);\n    }\n\n    float HoL = hippt::clamp(1.0e-8f, 1.0f, hippt::dot(local_half_vector, local_to_light_direction));\n\n    ColorRGB32F F_metal = adobe_f82_tint_fresnel(bsdf_context.material.base_color, bsdf_context.material.metallic_F82, bsdf_context.material.metallic_F90, bsdf_context.material.metallic_F90_falloff_exponent, HoL);\n    ColorRGB32F F_thin_film = thin_film_fresnel(bsdf_context.material, incident_ior, HoL);\n    ColorRGB32F F = hippt::lerp(F_metal, F_thin_film, bsdf_context.material.thin_film);\n\n    return torrance_sparrow_GGX_eval_reflect<PrincipledBSDFDoEnergyCompensation && PrincipledBSDFDoMetallicEnergyCompensation>(render_data,\n        regularized_roughness, anisotropy, bsdf_context.material.do_metallic_energy_compensation, F,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        pdf, metal_delta_direction_sampled,\n        bsdf_context.current_bounce);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_metallic_pdf(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    float roughness, float anisotropy,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_half_vector)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n    MaterialUtils::SpecularDeltaReflectionSampled metal_delta_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, anisotropy, bsdf_context.incident_light_info);\n    if (metal_delta_direction_sampled == MaterialUtils::SpecularDeltaReflectionSampled::SPECULAR_PEAK_NOT_SAMPLED)\n        // The distribution isn't worth evaluating because it's specular but we the incident\n        // light direction wasn't sampled from a specular distribution\n        return 0.0f;\n\n    return torrance_sparrow_GGX_pdf_reflect(render_data,\n        regularized_roughness, anisotropy,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        metal_delta_direction_sampled);\n}\n\n/**\n * The sampled direction is returned in the local shading frame of the basis used for 'local_view_direction'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_metallic_sample(const HIPRTRenderData& render_data, const BSDFContext& bsdf_context, float roughness, float anisotropy,\n    const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n    return microfacet_GGX_sample_reflection(regularized_roughness, anisotropy, local_view_direction, random_number_generator);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_diffuse_eval(const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction, float& pdf)\n{\n    // The diffuse lobe is a simple Oren Nayar lobe\n#if PrincipledBSDFDiffuseLobe == PRINCIPLED_DIFFUSE_LOBE_LAMBERTIAN\n    return lambertian_brdf_eval(material, local_to_light_direction.z, pdf);\n#elif PrincipledBSDFDiffuseLobe == PRINCIPLED_DIFFUSE_LOBE_OREN_NAYAR\n    return oren_nayar_brdf_eval(material, local_view_direction, local_to_light_direction, pdf);\n#endif\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_diffuse_pdf(const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction)\n{\n    // The diffuse lobe is a simple Oren Nayar lobe\n#if PrincipledBSDFDiffuseLobe == PRINCIPLED_DIFFUSE_LOBE_LAMBERTIAN\n    return lambertian_brdf_pdf(material, local_to_light_direction.z);\n#elif PrincipledBSDFDiffuseLobe == PRINCIPLED_DIFFUSE_LOBE_OREN_NAYAR\n    return oren_nayar_brdf_pdf(material, local_view_direction, local_to_light_direction);\n#endif\n}\n\n/**\n * The sampled direction is returned in world space\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_diffuse_sample(const float3& surface_normal, Xorshift32Generator& random_number_generator)\n{\n    // Our Oren-Nayar diffuse lobe is sampled by a cosine weighted distribution\n    return cosine_weighted_sample_around_normal_world_space(surface_normal, random_number_generator);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_specular_fresnel(const DeviceUnpackedEffectiveMaterial& material, float relative_specular_ior, float cos_theta_i)\n{\n    // We want the IOR of the layer we're coming from for the thin-film fresnel\n    // \n    // 'relative_specular_IOR' is \"A / B\"\n    // with A the IOR of the specular layer\n    // and B the IOR of the layer (or medium) above the specular layer\n    //\n    // so the IOR of the layer above is 1.0f / (relative_IOR / specular_ior) = specular_IOR / relative_IOR\n    float layer_above_IOR = material.ior / relative_specular_ior;\n\n    // Computing the fresnel term\n    // It's either the thin film fresnel for thin film interference or the usual\n    // non colored dielectric/dielectric fresnel.\n    //\n    // We're lerping between the two based on material.thin_film\n    float material_thin_film = material.thin_film;\n    ColorRGB32F F_specular;\n    if (material_thin_film < 1.0f)\n        F_specular = ColorRGB32F(full_fresnel_dielectric(cos_theta_i, relative_specular_ior));\n\n    ColorRGB32F F_thin_film = thin_film_fresnel(material, layer_above_IOR, cos_theta_i);\n    ColorRGB32F F = hippt::lerp(F_specular, F_thin_film, material_thin_film);\n\n    return F;\n}\n\n/**\n * Returns the relative IOR as \"A /B\"\n * with A the IOR of the specular layer\n * and B the IOR of the layer (or medium) above the specular layer\n * \n * 'incident_medium_ior' should be the IOR of the medium in which the object is (i.e. the air most likely)\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_specular_relative_ior(const DeviceUnpackedEffectiveMaterial& material, float incident_medium_ior)\n{\n    if (material.coat == 0.0f)\n        return material.ior;\n\n    // When computing the specular layer, the incident IOR actually isn't always\n    // that of the incident medium because we may have the coat layer above us instead of the medium\n    // so the \"proper\" IOR to use here is actually the lerp between the medium and the coat\n    // IOR depending on the coat factor\n    float incident_layer_ior = hippt::lerp(incident_medium_ior, material.coat_ior, material.coat);\n    float relative_ior = material.ior / incident_layer_ior;\n    if (relative_ior < 1.0f)\n        // If the coat IOR (which we're coming from) is greater than the IOR\n        // of the base layer (which is the specular layer with IOR material.ior)\n        // then we may hit total internal reflection when entering the specular layer from\n        // the coat layer above. This manifests as a weird ring near grazing angles.\n        //\n        // This weird ring should not happen in reality. It only happens because we're\n        // not bending the rays when refracting into the coat layer: we compute the\n        // fresnel at the specular/coat interface as if the light direction just went\n        // straight through the coat layer without refraction. There will always be\n        // some refraction at the air/coat interface if the coat layer IOR is > 1.0f.\n        //\n        // The proper solution would be to actually bend the ray after it hits the coat layer.\n        // We would then be evaluating the fresnel at the coat/specular interface with a\n        // incident light cosine angle that is different and we wouldn't get total internal reflection.\n        //\n        // This is explained in the [OpenPBR Spec 2024]\n        // https://academysoftwarefoundation.github.io/OpenPBR/#model/coat/totalinternalreflection\n        // \n        // A more computationally efficient solution is to simply invert the IOR as done here.\n        // This is also explained in the OpenPBR spec as well as in \n        // [Novel aspects of the Adobe Standard Material, Kutz, Hasan, Edmondson, 2023]\n        // https://helpx.adobe.com/content/dam/substance-3d/general-knowledge/asm/Adobe%20Standard%20Material%20-%20Technical%20Documentation%20-%20May2023.pdf\n        relative_ior = 1.0f / relative_ior;\n\n    return relative_ior;\n}\n\n/**\n * 'relative_ior' is eta_t / eta_i with 'eta_t' the IOR of the glossy layer and\n * 'eta_i' the IOR of \n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_specular_eval(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float relative_ior,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_half_vector, float& pdf)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n    MaterialUtils::SpecularDeltaReflectionSampled is_specular_delta_reflection_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, bsdf_context.material.anisotropy, bsdf_context.incident_light_info);\n\n    // The specular lobe is just another GGX lobe\n    // \n    // We actually don't want energy compensation here for the specular layer\n    // (hence the torrance_sparrow_GGX_eval_reflect<0>) because energy compensation\n    // for the specular layer is handled for the glossy based (specular + diffuse lobe)\n    // as a whole, not just in the specular layer \n    ColorRGB32F F = principled_specular_fresnel(bsdf_context.material, relative_ior, hippt::dot(local_to_light_direction, local_half_vector));\n    // No energy compensation on the specular layer because energy compensation is done on the whole diffuse + specular\n    // not just specular.\n    ColorRGB32F specular = torrance_sparrow_GGX_eval_reflect<0>(render_data, regularized_roughness, bsdf_context.material.anisotropy, /* do_energy_compensation */ false, F,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        pdf, is_specular_delta_reflection_sampled,\n        bsdf_context.current_bounce);\n\n    return specular;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_specular_pdf(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float relative_ior,\n    const float3& local_view_direction, const float3& local_to_light_direction, const float3& local_half_vector)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n    MaterialUtils::SpecularDeltaReflectionSampled is_specular_delta_reflection_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, regularized_roughness, bsdf_context.material.anisotropy, bsdf_context.incident_light_info);\n\n    return torrance_sparrow_GGX_pdf_reflect(render_data, regularized_roughness, bsdf_context.material.anisotropy,\n        local_view_direction, local_to_light_direction, local_half_vector,\n         is_specular_delta_reflection_sampled);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_specular_sample(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float roughness, float anisotropy, const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    float regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, bsdf_context.material.roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n    return microfacet_GGX_sample_reflection(regularized_roughness, anisotropy, local_view_direction, random_number_generator);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_beer_absorption(const HIPRTRenderData& render_data, RayVolumeState& ray_volume_state)\n{\n    // Note that we want to use the absorption of the material we finished traveling in.\n    // The BSDF we're evaluating right now is using the new material we're refracting in, this is not\n    // by this material that the ray has been absorbed. The ray has been absorded by the volume\n    // it was in before refracting here, so it's the incident mat index\n    ColorRGB32F absorption_color;\n    if (render_data.bsdfs_data.white_furnace_mode)\n        absorption_color = ColorRGB32F(1.0f);\n    else\n        absorption_color = render_data.buffers.materials_buffer.get_absorption_color(ray_volume_state.incident_mat_index);\n    if (!absorption_color.is_white())\n    {\n        // Capping the distance to avoid numerical issues at 0 distance\n        // (can happen depending on the geometry of the scene if a ray exits a volume very quickly after entering it)\n        ray_volume_state.distance_in_volume = hippt::max(ray_volume_state.distance_in_volume, 1.0e-6f);\n\n        // Remapping the absorption coefficient so that it is more intuitive to manipulate\n        // according to Burley, 2015 [5].\n        // This effectively gives us a \"at distance\" absorption coefficient.\n        ColorRGB32F absorption_coefficient = log(absorption_color) / render_data.buffers.materials_buffer.get_absorption_at_distance(ray_volume_state.incident_mat_index);\n        return exp(absorption_coefficient * ray_volume_state.distance_in_volume);\n    }\n\n    return ColorRGB32F(1.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_glass_eval(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, const float3& local_view_direction, const float3& local_to_light_direction, float& pdf)\n{\n    pdf = 0.0f;\n\n    float NoV = local_view_direction.z;\n    float NoL = local_to_light_direction.z;\n\n    if (hippt::abs(NoL) < 1.0e-8f)\n        // Check to avoid dividing by 0 later on\n        return ColorRGB32F(0.0f);\n\n    // We're in the case of reflection if the view direction and the bounced ray (light direction) are in the same hemisphere\n    bool reflecting = NoL * NoV > 0;\n\n    // Relative eta = eta_t / eta_i\n    float eta_i = bsdf_context.volume_state.incident_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n    float eta_t = bsdf_context.volume_state.outgoing_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.outgoing_mat_index);\n\n    float dispersion_abbe_number = bsdf_context.material.dispersion_abbe_number;\n    float dispersion_scale = bsdf_context.material.dispersion_scale;\n    eta_i = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_i, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n    eta_t = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_t, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n\n    float relative_eta = eta_t / eta_i;\n\n    // relative_eta can be 1 when refracting from a volume into another volume of the same IOR.\n    // This in conjunction with the view direction and the light direction being the negative of\n    // one another will lead the microfacet normal to be the null vector which then causes\n    // NaNs.\n    // \n    // Example:\n    // The view and light direction can be the negative of one another when looking straight at a\n    // flat window for example. The view direction is aligned with the normal of the window\n    // in this configuration whereas the refracting light direction (and it is very likely to refract\n    // in this configuration) is going to point exactly away from the view direction and the normal.\n    // \n    // We then have\n    // \n    // half_vector  = light_dir + relative_eta * view_dir\n    //              = light_dir + 1.0f * view_dir\n    //              = light_dir + view_dir = (0, 0, 0)\n    //\n    // Normalizing this null vector then leads to a NaNs because of the zero-length.\n    //\n    // We're settings relative_eta to 1.00001f to avoid this issue\n    if (hippt::abs(relative_eta - 1.0f) < 1.0e-5f)\n        relative_eta = 1.0f + 1.0e-5f;\n\n    bool thin_walled = bsdf_context.material.thin_walled;\n    float scaled_roughness = MaterialUtils::get_thin_walled_roughness(thin_walled, bsdf_context.material.roughness, relative_eta);\n\n    float3 local_half_vector;\n    if (scaled_roughness <= MaterialConstants::ROUGHNESS_CLAMP && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_FALSE)\n    {\n        // Fast path for specular glass\n        //\n        // Note that we check for 'PrincipledBSDFDeltaDistributionEvaluationOptimization' because if we're not using the delta distribution\n        // optimizations, any incident light direction given to the BSDF is going to be evaluated\n        // and so the half vector won't necessarily be (0, 0, 1).\n        //\n        // Any incident light direction may also be given with the optimizations ON but\n        // with the optimizations ON, any direction that doesn't align with the perfect\n        // reflection direction will be rejected (and contribute 0) so this is not an issue\n        //\n        // If microfacet regularization is enabled, the smooth glass is going to be roughened so we cannot\n        // assume roughness 0.0f and we fall back to the classical half-vector computation below\n        local_half_vector = make_float3(0.0f, 0.0f, 1.0f);\n    }\n    else\n    {\n        // Computing the generalized (that takes refraction into account) half vector\n        if (reflecting)\n            local_half_vector = local_to_light_direction + local_view_direction;\n        else\n        {\n            if (thin_walled)\n                // Thin walled material refract without light bending (because both refractions interfaces are simulated in one layer of material)\n                // just refract straight through i.e. light_direction = -view_direction\n                // It can be as si\n                local_half_vector = local_to_light_direction * make_float3(1.0f, 1.0f, -1.0f) + local_view_direction;\n            else\n                // We need to take the relative_eta into account when refracting to compute\n                // the half vector (this is the \"generalized\" half vector)\n                local_half_vector = local_to_light_direction * relative_eta + local_view_direction;\n        }\n\n        local_half_vector = hippt::normalize(local_half_vector);\n    }\n\n    if (local_half_vector.z < 0.0f)\n        // Because the rest of the function we're going to compute here assume\n        // that the microfacet normal is in the same hemisphere as the surface\n        // normal, we're going to flip it if needed\n        local_half_vector = -local_half_vector;\n\n    float HoL = hippt::dot(local_to_light_direction, local_half_vector);\n    float HoV = hippt::dot(local_view_direction, local_half_vector);\n\n    if (HoL * NoL < 0.0f || HoV * NoV < 0.0f)\n        // Backfacing microfacets when the microfacet normal isn't in the same\n        // hemisphere as the view dir or light dir\n        return ColorRGB32F(0.0f);\n\n    float thin_film = bsdf_context.material.thin_film;\n    ColorRGB32F F_thin_film = thin_film_fresnel(bsdf_context.material, eta_i, HoV);\n    ColorRGB32F F_no_thin_film;\n    if (thin_film < 1.0f)\n        F_no_thin_film = ColorRGB32F(full_fresnel_dielectric(HoV, relative_eta));\n    ColorRGB32F F = hippt::lerp(F_no_thin_film, F_thin_film, thin_film);\n\n    if (thin_walled && F.r < 1.0f && thin_film == 0.0f && scaled_roughness < 0.1f)\n        // If this is not total reflection, adjusting the fresnel term to account for inter-reflections within the thin interface\n        // Not doing this if thin-film is present because that would not be accurate at all. Thin-film\n        // effect require phase shift computations and that's expensive so we're just not doing it here\n        // instead\n        //\n        // Reference: Dielectric BSDF, PBR Book 4ed: https://pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF\n        //\n        // If there is no thin-film, the fresnel reflectance is non-colored and is the same\n        // value for all RGB wavelengths. This means that f_reflect_proba is actually just the fresnel reflection factor\n        //\n        // This fresnel scaling only works at roughness 0 but still using below 0.1f for a close enough approximation\n        F += ColorRGB32F(hippt::square(1.0f - F.r) * F.r / (1.0f - hippt::square(F.r)));\n\n    float f_reflect_proba = F.luminance();\n\n    ColorRGB32F color;\n    if (reflecting)\n    {\n        float regularized_roughness = scaled_roughness;\n        if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            // If this if for MIS, we want to use the same roughness as for the BSDF sampling so that the MIS weights are correct\n            regularized_roughness = MicrofacetRegularization::regularize_mix_reflection_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n        else if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n        MaterialUtils::SpecularDeltaReflectionSampled delta_glass_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, scaled_roughness, bsdf_context.material.anisotropy, bsdf_context.incident_light_info);\n\n        color = torrance_sparrow_GGX_eval_reflect<0>(render_data, regularized_roughness, bsdf_context.material.anisotropy, false, F,\n            local_view_direction, local_to_light_direction, local_half_vector,\n            pdf, delta_glass_direction_sampled, bsdf_context.current_bounce);\n\n        // Note: for specular glass, the compensation term will never be evaluated as there is no energy loss.\n        // The function will return very quickly and will return 1.0f\n        float compensation_term = get_GGX_energy_compensation_dielectrics(render_data, bsdf_context.material, bsdf_context.volume_state.inside_material, eta_t, eta_i, relative_eta, local_view_direction.z, bsdf_context.current_bounce);\n        // [Turquin, 2019] Eq. 18 for dielectric microfacet energy compensation\n        color /= compensation_term;\n\n        // Scaling the PDF by the probability of being here (reflection of the ray and not transmission)\n        pdf *= f_reflect_proba;\n    }\n    else\n    {\n        float regularized_roughness = scaled_roughness;\n        if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            // If this if for MIS, we want to use the same roughness as for the BSDF sampling so that the MIS weights are correct\n            regularized_roughness = MicrofacetRegularization::regularize_mix_reflection_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n        else if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            regularized_roughness = MicrofacetRegularization::regularize_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n\n        color = torrance_sparrow_GGX_eval_refract(bsdf_context.material, regularized_roughness, relative_eta, F,\n            local_view_direction, local_to_light_direction, local_half_vector,\n            pdf, bsdf_context.incident_light_info);\n        // Taking refraction russian roulette probability into account\n        pdf *= 1.0f - f_reflect_proba;\n\n        // Note: for specular glass, the compensation term will never be evaluated as there is no energy loss.\n        // The function will return very quickly and will return 1.0f\n        float compensation_term = get_GGX_energy_compensation_dielectrics(render_data, bsdf_context.material, regularized_roughness, bsdf_context.volume_state.inside_material, eta_t, eta_i, relative_eta, local_view_direction.z, bsdf_context.current_bounce);\n        // [Turquin, 2019] Eq. 18 for dielectric microfacet energy compensation\n        color /= compensation_term;\n\n        if (thin_walled)\n            // Thin materials use the base color squared to represent both the entry and the exit\n            // simultaneously\n            color *= bsdf_context.material.base_color;\n\n        if (thin_walled && bsdf_context.update_ray_volume_state)\n            // For thin materials, refracting in equals refracting out so we're poping the stack\n            bsdf_context.volume_state.interior_stack.pop(bsdf_context.volume_state.inside_material);\n        else if (bsdf_context.volume_state.incident_mat_index != NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX)\n        {\n            // If we're not coming from the air, this means that we were in a volume and we're currently\n            // refracting out of the volume or into another volume.\n            // This is where we take the absorption of our travel into account using Beer-Lambert's law.\n            color *= principled_beer_absorption(render_data, bsdf_context.volume_state);\n\n            if (bsdf_context.update_ray_volume_state)\n            {\n                // We changed volume so we're resetting the distance\n                bsdf_context.volume_state.distance_in_volume = 0.0f;\n                if (bsdf_context.volume_state.inside_material)\n                    // We refracting out of a volume so we're poping the stack\n                    bsdf_context.volume_state.interior_stack.pop(bsdf_context.volume_state.inside_material);\n            }\n        }\n    }\n\n    return color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_glass_pdf(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, const float3& local_view_direction, const float3& local_to_light_direction)\n{\n    float pdf = 0.0f;\n\n    float NoV = local_view_direction.z;\n    float NoL = local_to_light_direction.z;\n\n    if (hippt::abs(NoL) < 1.0e-8f)\n        // Check to avoid dividing by 0 later on\n        return 0.0f;\n\n    // We're in the case of reflection if the view direction and the bounced ray (light direction) are in the same hemisphere\n    bool reflecting = NoL * NoV > 0;\n\n    // Relative eta = eta_t / eta_i\n    float eta_i = bsdf_context.volume_state.incident_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n    float eta_t = bsdf_context.volume_state.outgoing_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.outgoing_mat_index);\n\n    float dispersion_abbe_number = bsdf_context.material.dispersion_abbe_number;\n    float dispersion_scale = bsdf_context.material.dispersion_scale;\n    eta_i = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_i, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n    eta_t = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_t, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n\n    float relative_eta = eta_t / eta_i;\n\n    // relative_eta can be 1 when refracting from a volume into another volume of the same IOR.\n    // This in conjunction with the view direction and the light direction being the negative of\n    // one another will lead the microfacet normal to be the null vector which then causes\n    // NaNs.\n    // \n    // Example:\n    // The view and light direction can be the negative of one another when looking straight at a\n    // flat window for example. The view direction is aligned with the normal of the window\n    // in this configuration whereas the refracting light direction (and it is very likely to refract\n    // in this configuration) is going to point exactly away from the view direction and the normal.\n    // \n    // We then have\n    // \n    // half_vector  = light_dir + relative_eta * view_dir\n    //              = light_dir + 1.0f * view_dir\n    //              = light_dir + view_dir = (0, 0, 0)\n    //\n    // Normalizing this null vector then leads to a NaNs because of the zero-length.\n    //\n    // We're settings relative_eta to 1.00001f to avoid this issue\n    if (hippt::abs(relative_eta - 1.0f) < 1.0e-5f)\n        relative_eta = 1.0f + 1.0e-5f;\n\n    bool thin_walled = bsdf_context.material.thin_walled;\n    float scaled_roughness = MaterialUtils::get_thin_walled_roughness(thin_walled, bsdf_context.material.roughness, relative_eta);\n\n    float3 local_half_vector;\n    if (scaled_roughness <= MaterialConstants::ROUGHNESS_CLAMP && PrincipledBSDFDeltaDistributionEvaluationOptimization == KERNEL_OPTION_TRUE && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_FALSE)\n    {\n        // Fast path for specular glass\n        //\n        // Note that we check for 'PrincipledBSDFDeltaDistributionEvaluationOptimization' because if we're not using the delta distribution\n        // optimizations, any incident light direction given to the BSDF is going to be evaluated\n        // and so the half vector won't necessarily be (0, 0, 1).\n        //\n        // Any incident light direction may also be given with the optimizations ON but\n        // with the optimizations ON, any direction that doesn't align with the perfect\n        // reflection direction will be rejected (and contribute 0) so this is not an issue\n        //\n        // If microfacet regularization is enabled, the smooth glass is going to be roughened so we cannot\n        // assume roughness 0.0f and we fall back to the classical half-vector computation below\n        local_half_vector = make_float3(0.0f, 0.0f, 1.0f);\n    }\n    else\n    {\n        // Computing the generalized (that takes refraction into account) half vector\n        if (reflecting)\n            local_half_vector = local_to_light_direction + local_view_direction;\n        else\n        {\n            if (thin_walled)\n                // Thin walled material refract without light bending (because both refractions interfaces are simulated in one layer of material)\n                // just refract straight through i.e. light_direction = -view_direction\n                // It can be as si\n                local_half_vector = local_to_light_direction * make_float3(1.0f, 1.0f, -1.0f) + local_view_direction;\n            else\n                // We need to take the relative_eta into account when refracting to compute\n                // the half vector (this is the \"generalized\" half vector)\n                local_half_vector = local_to_light_direction * relative_eta + local_view_direction;\n        }\n\n        local_half_vector = hippt::normalize(local_half_vector);\n    }\n\n    if (local_half_vector.z < 0.0f)\n        // Because the rest of the function we're going to compute here assume\n        // that the microfacet normal is in the same hemisphere as the surface\n        // normal, we're going to flip it if needed\n        local_half_vector = -local_half_vector;\n\n    float HoL = hippt::dot(local_to_light_direction, local_half_vector);\n    float HoV = hippt::dot(local_view_direction, local_half_vector);\n\n    if (HoL * NoL < 0.0f || HoV * NoV < 0.0f)\n        // Backfacing microfacets when the microfacet normal isn't in the same\n        // hemisphere as the view dir or light dir\n        return 0.0f;\n\n    float thin_film = bsdf_context.material.thin_film;\n    ColorRGB32F F_thin_film = thin_film_fresnel(bsdf_context.material, eta_i, HoV);\n    ColorRGB32F F_no_thin_film;\n    if (thin_film < 1.0f)\n        F_no_thin_film = ColorRGB32F(full_fresnel_dielectric(HoV, relative_eta));\n    ColorRGB32F F = hippt::lerp(F_no_thin_film, F_thin_film, thin_film);\n\n    if (thin_walled && F.r < 1.0f && thin_film == 0.0f && scaled_roughness < 0.1f)\n        // If this is not total reflection, adjusting the fresnel term to account for inter-reflections within the thin interface\n        // Not doing this if thin-film is present because that would not be accurate at all. Thin-film\n        // effect require phase shift computations and that's expensive so we're just not doing it here\n        // instead\n        //\n        // Reference: Dielectric BSDF, PBR Book 4ed: https://pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF\n        //\n        // If there is no thin-film, the fresnel reflectance is non-colored and is the same\n        // value for all RGB wavelengths. This means that f_reflect_proba is actually just the fresnel reflection factor\n        //\n        // This fresnel scaling only works at roughness 0 but still using below 0.1f for a close enough approximation\n        F += ColorRGB32F(hippt::square(1.0f - F.r) * F.r / (1.0f - hippt::square(F.r)));\n\n    float f_reflect_proba = F.luminance();\n\n    if (reflecting)\n    {\n        float regularized_roughness = scaled_roughness;\n        if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            // If this if for MIS, we want to use the same roughness as for the BSDF sampling so that the MIS weights are correct\n            regularized_roughness = MicrofacetRegularization::regularize_mix_reflection_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n        else if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            regularized_roughness = MicrofacetRegularization::regularize_reflection(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, render_data.render_settings.sample_number);\n\n        MaterialUtils::SpecularDeltaReflectionSampled delta_glass_direction_sampled = MaterialUtils::is_specular_delta_reflection_sampled(bsdf_context.material, scaled_roughness, bsdf_context.material.anisotropy, bsdf_context.incident_light_info);\n\n        pdf = torrance_sparrow_GGX_pdf_reflect(render_data, regularized_roughness, bsdf_context.material.anisotropy,\n            local_view_direction, local_to_light_direction, local_half_vector,\n            delta_glass_direction_sampled);\n\n        // Scaling the PDF by the probability of being here (reflection of the ray and not transmission)\n        pdf *= f_reflect_proba;\n    }\n    else\n    {\n        float regularized_roughness = scaled_roughness;\n        if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            // If this if for MIS, we want to use the same roughness as for the BSDF sampling so that the MIS weights are correct\n            regularized_roughness = MicrofacetRegularization::regularize_mix_reflection_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n        else if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n            regularized_roughness = MicrofacetRegularization::regularize_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n\n        pdf = torrance_sparrow_GGX_pdf_refract(bsdf_context.material, regularized_roughness, relative_eta,\n            local_view_direction, local_to_light_direction, local_half_vector,\n            bsdf_context.incident_light_info);\n        // Taking refraction russian roulette probability into account\n        pdf *= 1.0f - f_reflect_proba;\n    }\n\n    return pdf;\n}\n\n/**\n * The sampled direction is returned in the local shading frame of the basis used for 'local_view_direction'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_glass_sample(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float3 local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    float eta_i = bsdf_context.volume_state.incident_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n    float eta_t = bsdf_context.volume_state.outgoing_mat_index == NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.outgoing_mat_index);\n\n    float dispersion_abbe_number = bsdf_context.material.dispersion_abbe_number;\n    float dispersion_scale = bsdf_context.material.dispersion_scale;\n    eta_i = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_i, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n    eta_t = compute_dispersion_ior(dispersion_abbe_number, dispersion_scale, eta_t, hippt::abs(bsdf_context.volume_state.sampled_wavelength));\n\n    float relative_eta = eta_t / eta_i;\n    // To avoid sampling directions that would lead to a null half_vector.\n    // Explained in more details in principled_glass_eval.\n    if (hippt::abs(relative_eta - 1.0f) < 1.0e-5f)\n        relative_eta = 1.0f + 1.0e-5f;\n\n    bool thin_walled = bsdf_context.material.thin_walled;\n    float thin_walled_scaled_roughness = MaterialUtils::get_thin_walled_roughness(thin_walled, bsdf_context.material.roughness, relative_eta);\n\n    if (bsdf_context.bsdf_regularization_mode == MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS && PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE)\n        // Because we do not know if advance if we're going to reflecct or refract, we do not know\n        // whether we should regularize using the microfacet reflection or refraction bound function.\n        // \n        // So we take the average. This is going to be over-roughened for reflection and under-roughened\n        // for refractions but this should still be effective\n        thin_walled_scaled_roughness = MicrofacetRegularization::regularize_mix_reflection_refraction(render_data.bsdfs_data.microfacet_regularization, bsdf_context.bsdf_regularization_mode, thin_walled_scaled_roughness, bsdf_context.accumulated_path_roughness, eta_i, eta_t, render_data.render_settings.sample_number);\n\n    float alpha_x, alpha_y;\n    MaterialUtils::get_alphas(thin_walled_scaled_roughness, bsdf_context.material.anisotropy, alpha_x, alpha_y);\n    float3 microfacet_normal = GGX_anisotropic_sample_microfacet(local_view_direction, alpha_x, alpha_y, random_number_generator);\n\n    float HoV = hippt::dot(local_view_direction, microfacet_normal);\n    float thin_film = bsdf_context.material.thin_film;\n\n    ColorRGB32F F_thin_film = thin_film_fresnel(bsdf_context.material, eta_i, HoV);\n    ColorRGB32F F_no_thin_film;\n    if (thin_film < 1.0f)\n        F_no_thin_film = ColorRGB32F(full_fresnel_dielectric(HoV, relative_eta));\n    ColorRGB32F F = hippt::lerp(F_no_thin_film, F_thin_film, thin_film);\n\n    if (thin_walled && F.r < 1.0f && thin_film == 0.0f && thin_walled_scaled_roughness < 0.1f)\n        // If this is not total reflection, adjusting the fresnel term to account for inter-reflections within the thin interface\n        // Not doing this if thin-film is present because that would not be accurate at all. Thin-film\n        // effect require phase shift computations and that's very expensive so we're just not doing it here\n        // instead\n        //\n        // Reference: Dielectric BSDF, PBR Book 4ed: https://pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF\n        //\n        // If there is no thin-film, the fresnel reflectance is non-colored and is the same\n        // value for all RGB wavelengths. This means that f_reflect_proba is actually just the fresnel reflection factor\n        //\n        // This fresnel scaling only works at roughness 0 but still using below 0.1f for a close enough approximation\n        F += ColorRGB32F(hippt::square(1.0f - F.r) * F.r / (1.0f - hippt::square(F.r)));\n\n    float f_reflect_proba = F.luminance();\n\n    float rand_1 = random_number_generator();\n\n    float3 sampled_direction;\n    if (rand_1 < f_reflect_proba)\n    {\n        // Reflection\n        sampled_direction = reflect_ray(local_view_direction, microfacet_normal);\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFLECT_LOBE;\n\n        // This is a reflection, we're poping the stack\n        if (bsdf_context.update_ray_volume_state)\n            bsdf_context.volume_state.interior_stack.pop(false);\n    }\n    else\n    {\n        // Refraction\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE;\n\n        if (hippt::dot(microfacet_normal, local_view_direction) < 0.0f)\n            // For the refraction operation that follows, we want the direction to refract (the view\n            // direction here) to be in the same hemisphere as the normal (the microfacet normal here)\n            // so we're flipping the microfacet normal in case it wasn't in the same hemisphere as\n            // the view direction\n            microfacet_normal = -microfacet_normal;\n\n        if (thin_walled)\n        {\n            // Because the interface is thin (and so we refract twice, \"cancelling\" the bending the light),\n            // the refraction direction is just the incoming (view direction) reflected\n            // and flipped about the normal plane\n\n            float3 reflected = reflect_ray(local_view_direction, microfacet_normal);\n            // Now flipping\n            reflected.z *= -1.0f;\n\n            // Refraction through the thin walled material. \n            // We're poping the stack because we're not inside the material even\n            // though this is a refraction. A thin material has no inside\n            if (bsdf_context.update_ray_volume_state)\n                bsdf_context.volume_state.interior_stack.pop(false);\n\n            return reflected;\n        }\n        else\n            sampled_direction = refract_ray(local_view_direction, microfacet_normal, relative_eta);\n    }\n\n    return sampled_direction;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_diffuse_transmission_eval(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    RayVolumeState& ray_volume_state, bool update_ray_volume_state,\n    const float3& local_view_direction, float3 local_to_light_direction,\n    float& diffuse_transmission_pdf)\n{\n    diffuse_transmission_pdf = 0.0f;\n\n    if (local_view_direction.z * local_to_light_direction.z > 0.0f)\n        // Both are in the same hemisphere, incorrect for a transmission only lobe\n        return ColorRGB32F(0.0f);\n\n    ColorRGB32F color = material.base_color * M_INV_PI;\n    if (ray_volume_state.incident_mat_index != NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX)\n    {\n        // If we're not coming from the air, this means that we were in a volume and we're currently\n        // refracting out of the volume or into another volume.\n        // This is where we take the absorption of our travel into account using Beer-Lambert's law.\n        color *= principled_beer_absorption(render_data, ray_volume_state);\n\n        if (update_ray_volume_state)\n        {\n            // We changed volume so we're resetting the distance\n            ray_volume_state.distance_in_volume = 0.0f;\n            if (ray_volume_state.inside_material)\n                // We refracting out of a volume so we're poping the stack\n                ray_volume_state.interior_stack.pop(ray_volume_state.inside_material);\n        }\n    }\n\n    diffuse_transmission_pdf = hippt::abs(local_to_light_direction.z * M_INV_PI);\n\n    return color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_diffuse_transmission_pdf(const float3& local_view_direction, float3 local_to_light_direction)\n{\n    if (local_view_direction.z * local_to_light_direction.z > 0.0f)\n        // Both are in the same hemisphere, incorrect for a transmission only lobe\n        return 0.0f;\n\n    return hippt::abs(local_to_light_direction.z * M_INV_PI);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 principled_diffuse_transmission_sample(float3 surface_normal, Xorshift32Generator& random_number_generator)\n{\n    // Negating the normal here because by convention the surface normal given\n    // to this function is in the same hemisphere as the view direction but we\n    // want to sample a refraction, on the other side of the normal\n    return cosine_weighted_sample_around_normal_world_space(-surface_normal, random_number_generator);\n}\n\n/**\n * Reference:\n * \n * [1] [Open PBR Specification - Coat Darkening] https://academysoftwarefoundation.github.io/OpenPBR/#model/coat/darkening\n * \n * 'relative_eta' must be coat_ior / incident_medium_ior\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_coat_compute_darkening(const DeviceUnpackedEffectiveMaterial& material, float relative_eta, float view_dir_fresnel)\n{\n    if (material.coat_darkening == 0.0f)\n        return ColorRGB32F(1.0f);\n    \n    // Fraction of light that exhibits total internal reflection inside the clearcoat layer,\n    // assuming a perfectly diffuse base\n    float Kr = 1.0f - (1.0f - fresnel_hemispherical_albedo_fit(relative_eta)) / (relative_eta * relative_eta); // Eq. 66\n\n    // Fraction of light that exhibits total internal reflection inside the clearcoat layer,\n    // assuming a perfectly smooth base\n    float Ks = view_dir_fresnel; // Eq. 67\n\n    // The roughness of the base layer isn't just material.roughness: \n    // \n    // What if material.roughness is 0.0f but there is no specular, or metallic or glass layer.\n    // This means that there is just the diffuse lobe below the clearcoat layer. So even if\n    // material.roughness is 0.0f, because the coat layer is directly on top of the diffuse layer,\n    // the roughness of the base layer is 1.0f\n    //\n    // Now what if we have 0 specular but 1 metallic? Then we must use the roughness of the metallic layer\n    // (which is actually just material.roughness).\n    //\n    // Same for the glass lobe (and specular lobe actually)\n    //\n    // So that's why we have these max() calls below\n    //\n    // The TL;DR is that we must use material.roughness is one of the base layer lobes (metallic/specular/glass) is 1.0f\n    // Otherwise, is all the base layer lobes are 0.0f, then the roughness is 1.0f because this is just the diffuse lobe\n    // And we lerp for the intermediate cases\n    float base_roughness = hippt::lerp(1.0f, material.roughness, hippt::max(material.specular_transmission, hippt::max(material.metallic, material.specular)));\n    // Now because our base, in the general case, isn't perfectly diffuse or perfectly smooth\n    // we're lerping between the two values based on the roughness of the based layer and this gives us a good\n    // approximation of how much total internal reflection we have inside the coat layer\n    float K = hippt::lerp(Ks, Kr, base_roughness); // Eq. 68\n\n    // The base albedo is the albedo of the BSDF below the clearcoat.\n    // Because the BSDF below the clearcoat may be composed of many layers,\n    // we're approximating the overall as the blending of the albedos of the individual\n    // lobes.\n    //\n    // Only the base substrate of the BSDF and the sheen layer have albedos so we only\n    // have to mix those two\n    float sheen = material.sheen;\n    ColorRGB32F base_albedo = (material.base_color + material.sheen_color * sheen) / (1.0f + sheen);\n    // This approximation of the amount of total internal reflection can then be used to\n    // compute the darkening of the base caused by the clearcoating\n    ColorRGB32F darkening = (1.0f - K) / (ColorRGB32F(1.0f) - base_albedo * K);\n\n    // Disabling more or less the darkening based on:\n    //  - whether or not we have a coat layer at all\n    //  - whether or not we have coat darkening enabled at all or not\n    //  - whether or not we have a diffuse transmission lobe below the coat\n    //      layer, in which case there is no TIR between the diffuse\n    //      transmission lobe and the coat layer because the diffuse\n    //      transmission lobe is a BTDF only, it doesn't\n    //      reflect light --> no TIR --> no darkening\n    darkening = hippt::lerp(ColorRGB32F(1.0f), darkening, material.coat * material.coat_darkening * (1.0f - material.diffuse_transmission));\n\n    return darkening;\n}\n\n/**\n * 'internal' functions are just so that 'principled_bsdf_eval' looks nicer\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_coat_layer(const HIPRTRenderData& render_data, const BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    float incident_ior, bool refracting, float coat_weight, float coat_proba, ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    // '|| refracting' here is needed because if we have our coat\n    // lobe on top of the glass lobe, we want to still compute the portion\n    // of light that is left for the glass lobe after going through the coat lobe\n    // so that's why we get into to if() block that does the computation but\n    // we're only going to compute the absorption of the coat layer\n    float coat_ior = bsdf_context.material.coat_ior;\n    if (coat_weight > 0.0f && ((local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || refracting))\n    {\n        float coat_pdf = 0.0f;\n        ColorRGB32F contribution;\n        if (!refracting)\n        {\n            // The coat layer only contribtues for light direction in the same\n            // hemisphere as the view direction (so reflections only, not refractions)\n            contribution = principled_coat_eval(render_data, bsdf_context, local_view_direction, local_to_light_direction, local_half_vector, incident_ior, coat_pdf);\n            contribution *= coat_weight;\n            contribution *= layers_throughput;\n        }\n\n        out_cumulative_pdf += coat_pdf * coat_proba;\n\n        // We're using hippt::abs() in the fresnel computation that follow because\n        // we may compute these fresnels with incident light directions that are below\n        // the hemisphere (for refractions for example) so that's where we want\n        // the cosine angle not to be negative\n\n        ColorRGB32F layer_below_attenuation = ColorRGB32F(1.0f);\n        // Only the transmitted portion of the light goes to the layer below\n        // We're using the shading normal here and not the microfacet normal because:\n        // We want the proportion of light that reaches the layer below.\n        // That's given by 1.0f - fresnelReflection.\n        // \n        // But '1.0f - fresnelReflection' needs to be computed with the shading normal, \n        // not the microfacet normal i.e. it needs to be 1.0f - Fresnel(dot(N, L)), \n        // not 1.0f - Fresnel(dot(H, L))\n        // \n        // By computing 1.0f - Fresnel(dot(H, L)), we're computing the light\n        // that goes through only that one microfacet with the microfacet normal. But light\n        // reaches the layer below through many other microfacets, not just the one with our current\n        // micronormal here (local_half_vector). To compute this correctly, we would actually need\n        // to integrate over the microfacet normals and compute the fresnel transmission portion\n        // (1.0f - Fresnel(dot(H, L))) for each of them and weight that contribution by the\n        // probability given by the normal distribution function for the microfacet normal.\n        // \n        // We can't do that integration online so we're instead using the shading normal to compute\n        // the transmitted portion of light. That's actually either a good approximation or the\n        // exact solution. That was shown in GDC 2017 [PBR Diffuse Lighting for GGX + Smith Microsurfaces]\n        layer_below_attenuation *= 1.0f - full_fresnel_dielectric(hippt::abs(local_to_light_direction.z), incident_ior, coat_ior);\n\n        // Also, when light reflects off of the layer below the coat layer, some of that reflected light\n        // will hit total internal reflection against the coat/air interface. This means that only\n        // the part of light that does not hit total internal reflection actually reaches the viewer.\n        // \n        // That's why we're computing another fresnel term here to account for that. And additional note:\n        // computing that fresnel with the direction reflected from the base layer or with the viewer direction\n        // is the same, Fresnel is symmetrical. But because we don't have the exact direction reflected from the\n        // base layer, we're using the view direction instead\n        float view_dir_fresnel = full_fresnel_dielectric(hippt::abs(local_view_direction.z), incident_ior, coat_ior);\n        layer_below_attenuation *= 1.0f - view_dir_fresnel;\n\n        if (!bsdf_context.material.coat_medium_absorption.is_white())\n        {\n            // Only computing the medium absorption if there is actually\n            // some absorption\n\n            // Taking the color of the absorbing coat medium into account when the light that got transmitted\n            // travels through it\n            //\n            // The distance traveled into the coat depends on the angle at which we're looking\n            // at it and the angle in which light goes: the grazier the angles, the more the\n            // absorption since we're traveling further in the coat before leaving\n            //\n            // Reference: [11], [13]\n            // \n            // It can happen that 'incident_refracted_angle' or 'outgoing_refracted_angle'\n            // are 0.0f \n            float incident_refracted_angle = hippt::max(1.0e-6f, sqrtf(1.0f - (1.0f - local_to_light_direction.z * local_to_light_direction.z) / (coat_ior * coat_ior)));\n            float outgoing_refracted_angle = hippt::max(1.0e-6f, sqrtf(1.0f - (1.0f - local_view_direction.z * local_view_direction.z) / (coat_ior * coat_ior)));\n\n            // Reference: [11], [13]\n            float traveled_distance_angle = 1.0f / incident_refracted_angle + 1.0f / outgoing_refracted_angle;\n            ColorRGB32F coat_absorption = exp(-(ColorRGB32F(1.0f) - pow(sqrt(bsdf_context.material.coat_medium_absorption), traveled_distance_angle)) * bsdf_context.material.coat_medium_thickness);\n            layer_below_attenuation *= coat_absorption;\n        }\n\n        layer_below_attenuation *= principled_coat_compute_darkening(bsdf_context.material, coat_ior / incident_ior, view_dir_fresnel);\n\n        // If the coat layer has 0 weight, we should not get any light attenuation.\n        // But if the coat layer has 1 weight, we should get the full attenuation that we\n        // computed in 'layer_below_attenuation' so we're lerping between no attenuation\n        // and full attenuation based on the material coat weight.\n        layer_below_attenuation = hippt::lerp(ColorRGB32F(1.0f), layer_below_attenuation, bsdf_context.material.coat);\n\n        layers_throughput *= layer_below_attenuation;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_coat_layer(const HIPRTRenderData& render_data, const BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    float incident_ior, bool refracting, float coat_weight, float coat_proba)\n{\n    // '|| refracting' here is needed because if we have our coat\n    // lobe on top of the glass lobe, we want to still compute the portion\n    // of light that is left for the glass lobe after going through the coat lobe\n    // so that's why we get into to if() block that does the computation but\n    // we're only going to compute the absorption of the coat layer\n    float coat_ior = bsdf_context.material.coat_ior;\n    if (coat_weight > 0.0f && ((local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || refracting))\n    {\n        float coat_pdf = 0.0f;\n\n        ColorRGB32F contribution;\n        if (!refracting)\n        {\n            // The coat layer only contribtues for light direction in the same\n            // hemisphere as the view direction (so reflections only, not refractions)\n            coat_pdf = principled_coat_pdf(render_data, bsdf_context, local_view_direction, local_to_light_direction, local_half_vector, incident_ior);\n        }\n\n        return coat_pdf * coat_proba;\n    }\n\n    return 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_sheen_layer(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction,\n    bool refracting, float sheen_weight, float sheen_proba,\n    ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    if ((sheen_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || refracting)\n    {\n        float sheen_reflectance;\n        float sheen_pdf;\n        ColorRGB32F contribution = principled_sheen_eval(render_data, material, local_view_direction, local_to_light_direction, sheen_pdf, sheen_reflectance);\n        contribution *= sheen_weight;\n        contribution *= layers_throughput;\n\n        out_cumulative_pdf += sheen_pdf * sheen_proba;\n\n        // Same as the coat layer for the sheen: only the refracted light goes into the layer below\n        // \n        // The proportion of light that is reflected is given by the Ri component of AiBiRi\n        // (see 'sheen_ltc_eval') which is returned by 'principled_sheen_eval' in 'sheen_reflectance'\n        layers_throughput *= 1.0f - material.sheen * sheen_reflectance;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_sheen_layer(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3& local_to_light_direction,\n    bool refracting, float sheen_weight, float sheen_proba)\n{\n    if ((sheen_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || refracting)\n    {\n        float sheen_pdf = principled_sheen_pdf(render_data, material, local_view_direction, local_to_light_direction);\n\n        return sheen_pdf * sheen_proba;\n    }\n\n    return 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_metal_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    float roughness, float anisotropy,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    float incident_ior,\n    float metal_weight, float metal_proba,\n    const ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    if (metal_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f)\n    {\n        float metal_pdf = 0.0f;\n        ColorRGB32F contribution;\n\n        contribution = principled_metallic_eval(render_data, bsdf_context,\n            roughness, anisotropy, incident_ior,\n            local_view_direction, local_to_light_direction, local_half_vector, metal_pdf);\n        contribution *= metal_weight;\n        contribution *= layers_throughput;\n\n        out_cumulative_pdf += metal_pdf * metal_proba;\n\n        // There is nothing below the metal layer so we don't have a\n        // layer_throughput attenuation here\n        // ...\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_metal_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    float roughness, float anisotropy,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    float incident_ior,\n    float metal_weight, float metal_proba)\n{\n    if (metal_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f)\n    {\n        float metal_pdf = principled_metallic_pdf(render_data, bsdf_context,\n            roughness, anisotropy,\n            local_view_direction, local_to_light_direction, local_half_vector);\n\n        return metal_pdf * metal_proba;\n    }\n\n    return 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_glass_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction,\n    float glass_weight, float glass_proba,\n    const ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    if (glass_weight > 0.0f)\n    {\n        float glass_pdf = 0.0f;\n        ColorRGB32F contribution;\n\n        contribution = principled_glass_eval(render_data, bsdf_context, local_view_direction, local_to_light_direction, glass_pdf);\n        contribution *= glass_weight;\n        contribution *= layers_throughput;\n\n        // There is nothing below the glass layer so we don't have a layer_throughput absorption here\n        // ...\n\n        out_cumulative_pdf += glass_pdf * glass_proba;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_glass_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction,\n    float glass_weight, float glass_proba)\n{\n    if (glass_weight > 0.0f)\n    {\n        float glass_pdf = principled_glass_pdf(render_data, bsdf_context, local_view_direction, local_to_light_direction);\n\n        // There is nothing below the glass layer so we don't have a layer_throughput absorption here\n        // ...\n\n        return glass_pdf * glass_proba;\n    }\n\n    return 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_diffuse_transmission_layer(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material,\n    RayVolumeState& ray_volume_state, bool update_ray_volume_state,\n    const float3& local_view_direction, const float3 local_to_light_direction,\n    float diffuse_transmission_weight, float diffuse_transmission_proba,\n    const ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    if (diffuse_transmission_weight > 0.0f && local_to_light_direction.z < 0.0f)\n    {\n        float diffuse_transmission_pdf = 0.0f;\n        ColorRGB32F contribution = principled_diffuse_transmission_eval(render_data, material, ray_volume_state, update_ray_volume_state, local_view_direction, local_to_light_direction, diffuse_transmission_pdf);\n        contribution *= diffuse_transmission_weight;\n        contribution *= layers_throughput;\n\n        // There is nothing below the diffuse transmission layer so we don't have a layer_throughput absorption here\n        // ...\n\n        out_cumulative_pdf += diffuse_transmission_pdf * diffuse_transmission_proba;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_diffuse_transmission_layer(const float3& local_view_direction, const float3 local_to_light_direction,\n    float diffuse_transmission_weight, float diffuse_transmission_proba)\n{\n    if (diffuse_transmission_weight > 0.0f && local_to_light_direction.z < 0.0f)\n    {\n        float diffuse_transmission_pdf = principled_diffuse_transmission_pdf(local_view_direction, local_to_light_direction);\n\n        return diffuse_transmission_pdf * diffuse_transmission_proba;\n    }\n\n    return 0.0f;\n}\n\n/**\n * Reference:\n *\n * [1] [Open PBR Specification - Coat Darkening] https://academysoftwarefoundation.github.io/OpenPBR/#model/coat/darkening\n *\n * 'relative_eta' must be coat_ior / incident_medium_ior\n *\n * This function computes the darkening/increase in saturation that happens\n * as light is trapped in the specular layer and bounces on the diffuse base.\n * \n * This is essentially the same function as 'principled_coat_compute_darkening'\n * but simplified since we know that only a diffuse base can be below the specular layer\n * \n * 'relative_eta' should be specular_ior / coat_ior (or divided by the incident\n * medium ior if there is no coating)\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_specular_compute_darkening(const DeviceUnpackedEffectiveMaterial& material, float relative_eta, float view_dir_fresnel)\n{\n    if (material.specular_darkening == 0.0f)\n        return ColorRGB32F(1.0f);\n\n    // Fraction of light that exhibits total internal reflection inside the clearcoat layer,\n    // assuming a perfectly diffuse base\n    float Kr = 1.0f - (1.0f - fresnel_hemispherical_albedo_fit(relative_eta)) / (relative_eta * relative_eta); // Eq. 66 of OpenPBR\n\n    // For the specular layer total internal reflection, we know that the base below is diffuse\n    // so K is just Kr\n    float K = Kr;\n\n    // The base albedo is the albedo of the BSDF below the specular layer.\n    // That's just the diffuse lobe so the base albedo is simple here.\n    ColorRGB32F base_albedo = material.base_color;\n    // This approximation of the amount of total internal reflection can then be used to\n    // compute the darkening of the base caused by the clearcoating\n    ColorRGB32F darkening = (1.0f - K) / (ColorRGB32F(1.0f) - base_albedo * K);\n\n    // Disabling more or less the darkening based on:\n    //  - whether or not we have a specular layer at all\n    //  - whether or not we have specular darkening enabled at all or not\n    //  - whether or not we have a diffuse transmission lobe below the specular\n    //      layer, in which case there is no TIR between the diffuse\n    //      transmission lobe and the specular layer because the diffuse\n    //      transmission lobe is a BTDF only, it doesn't\n    //      reflect light --> no TIR --> no darkening\n    darkening = hippt::lerp(ColorRGB32F(1.0f), darkening, material.specular * material.specular_darkening * (1.0f - material.diffuse_transmission));\n\n    return darkening;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_specular_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction,\n    const float3& local_half_vector, const float3& shading_normal,\n    float incident_medium_ior, float specular_weight, bool refracting, float specular_proba,\n    ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    // To even attempt the evaluation of the specular lobe, we need the specular weight to be non-zero\n    // \n    // We also need the view and light direction to be above the normal hemisphere because the specular layer\n    // is a BRDF: reflections only.\n    //\n    // However, we may still want to compute the layer throughput of the specular layer if we're given an\n    // incident light direction that comes from the diffuse transmission lobe: such a direction has to go through\n    // the specular layer first before going through the diffuse transmission lobe\n    // The microfacet BRDF will actually evaluate to 0 but the layer throughput will attenuate some light\n    //\n    // This applies to diffuse transmission but doesn't apply to glass though (for example) because the glass layer\n    // isn't \"below\" the specular layer, it's \"adjacent\" to it.\n    if (specular_weight > 0.0f && ((local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || (refracting && bsdf_context.incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_TRANSMISSION_LOBE)))\n    {\n        float relative_ior = principled_specular_relative_ior(bsdf_context.material, incident_medium_ior);\n\n        float specular_pdf = 0.0f;\n        ColorRGB32F contribution;\n\n        contribution = principled_specular_eval(render_data, bsdf_context,\n            relative_ior, local_view_direction, local_to_light_direction, local_half_vector,\n            specular_pdf);\n\n        // Tinting the specular reflection color\n        contribution *= hippt::lerp(ColorRGB32F(1.0f), bsdf_context.material.specular_tint * bsdf_context.material.specular_color, bsdf_context.material.specular);\n        contribution *= specular_weight;\n        contribution *= layers_throughput;\n\n        ColorRGB32F layer_below_attenuation(1.0f);\n        // Only the transmitted portion of the light goes to the layer below\n        // We're using the shading normal here and not the microfacet normal because:\n        // We want the proportion of light that reaches the layer below.\n        // That's given by 1.0f - fresnelReflection.\n        // \n        // But '1.0f - fresnelReflection' needs to be computed with the shading normal, \n        // not the microfacet normal i.e. it needs to be 1.0f - Fresnel(dot(N, L)), \n        // not 1.0f - Fresnel(dot(H, L))\n        // \n        // By computing 1.0f - Fresnel(dot(H, L)), we're computing the light\n        // that goes through only that one microfacet with the microfacet normal. But light\n        // reaches the layer below through many other microfacets, not just the one with our current\n        // micronormal here (local_half_vector). To compute this correctly, we would actually need\n        // to integrate over the microfacet normals and compute the fresnel transmission portion\n        // (1.0f - Fresnel(dot(H, L))) for each of them and weight that contribution by the\n        // probability given by the normal distribution function for the microfacet normal.\n        // \n        // We can't do that integration online so we're instead using the shading normal to compute\n        // the transmitted portion of light. That's actually either a good approximation or the\n        // exact solution. That was shown in GDC 2017 [PBR Diffuse Lighting for GGX + Smith Microsurfaces]\n        //\n        // We need the hippt::abs() here because we may be evaluating the fresnel terms with light directions/view \n        // directions that are below the surface because we're evaluating the specular lobe for a refracted direction\n        ColorRGB32F light_dir_fresnel = principled_specular_fresnel(bsdf_context.material, relative_ior, hippt::abs(local_to_light_direction.z));\n        // If we have a diffuse transmission lobe below the specular instead of the diffuse lobe, then we cannot\n        // have TIR in between the diffuse lobe and the specular lobe (inside the specular layer) because the diffuse\n        // transmission lobe is a BTDF only, it doesn't reflect any light --> no TIR\n        //\n        // We're cancelling the light_dir_fresnel instead of the view_dir_fresnel (which is the one that models the TIR)\n        // though because otherwise it seems to break, not sure why. The handling of fresnel effects when light is coming\n        // from below the specular (or coat lobe) lobe isn't perfect yet\n        light_dir_fresnel *= (1.0f - bsdf_context.material.diffuse_transmission);\n        layer_below_attenuation *= ColorRGB32F(1.0f) - light_dir_fresnel;\n\n        // Also, when light reflects off of the layer below the specular layer, some of that reflected light\n        // will hit total internal reflection against the specular/[coat or air] interface. This means that only\n        // the part of light that does not hit total internal reflection actually reaches the viewer.\n        // \n        // That's why we're computing another fresnel term here to account for that. And additional note:\n        // computing that fresnel with the direction reflected from the base layer or with the viewer direction\n        // is the same, Fresnel is symmetrical. But because we don't have the exact direction reflected from the\n        // base layer, we're using the view direction instead\n        ColorRGB32F view_dir_fresnel = principled_specular_fresnel(bsdf_context.material, relative_ior, hippt::abs(local_view_direction.z));\n        layer_below_attenuation *= ColorRGB32F(1.0f) - view_dir_fresnel;\n\n        // Taking into account the total internal reflection inside the specular layer \n        // (bouncing on the base diffuse layer). We're using the luminance of the fresnel here because\n        // the specular layer may have thin film interference which colors the fresnel but\n        // we're going to assume that the fresnel is non-colored and thus we just take the luminance\n        layer_below_attenuation *= principled_specular_compute_darkening(bsdf_context.material, relative_ior, view_dir_fresnel.luminance());\n\n        // If the specular layer has 0 weight, we should not get any light absorption.\n        // But if the specular layer has 1 weight, we should get the full absorption that we\n        // computed in 'layer_below_attenuation' so we're lerping between no absorption\n        // and full absorption based on the material specular weight.\n        layer_below_attenuation = hippt::lerp(ColorRGB32F(1.0f), layer_below_attenuation, bsdf_context.material.specular);\n\n        layers_throughput *= layer_below_attenuation;\n\n        out_cumulative_pdf += specular_pdf * specular_proba;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_specular_layer(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction,\n    const float3& local_half_vector, const float3& shading_normal,\n    float incident_medium_ior, float specular_weight, bool refracting, float specular_proba)\n{\n    // To even attempt the evaluation of the specular lobe, we need the specular weight to be non-zero\n    // \n    // We also need the view and light direction to be above the normal hemisphere because the specular layer\n    // is a BRDF: reflections only.\n    //\n    // However, we may still want to compute the layer throughput of the specular layer if we're given an\n    // incident light direction that comes from the diffuse transmission lobe: such a direction has to go through\n    // the specular layer first before going through the diffuse transmission lobe\n    // The microfacet BRDF will actually evaluate to 0 but the layer throughput will attenuate some light\n    //\n    // This applies to diffuse transmission but doesn't apply to glass though (for example) because the glass layer\n    // isn't \"below\" the specular layer, it's \"adjacent\" to it.\n    if (specular_weight > 0.0f && ((local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f) || (refracting && bsdf_context.incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_TRANSMISSION_LOBE)))\n    {\n        float relative_ior = principled_specular_relative_ior(bsdf_context.material, incident_medium_ior);\n        \n        float specular_pdf = principled_specular_pdf(render_data, bsdf_context, relative_ior, local_view_direction, local_to_light_direction, local_half_vector);\n\n        return specular_pdf * specular_proba;\n    }\n\n    return 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_diffuse_layer(const HIPRTRenderData& render_data, float incident_ior, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3 local_to_light_direction, float diffuse_weight, float diffuse_proba, ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    if (diffuse_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f)\n    {\n        float diffuse_pdf;\n        ColorRGB32F contribution = principled_diffuse_eval(material, local_view_direction, local_to_light_direction, diffuse_pdf);\n        contribution *= diffuse_weight;\n        contribution *= layers_throughput;\n\n        // Nothing below the diffuse layer so we don't have a layer throughput\n        // attenuation here\n\n        out_cumulative_pdf += diffuse_pdf * diffuse_proba;\n\n        return contribution;\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_diffuse_layer(const HIPRTRenderData& render_data, float incident_ior, const DeviceUnpackedEffectiveMaterial& material,\n    const float3& local_view_direction, const float3 local_to_light_direction, float diffuse_weight, float diffuse_proba)\n{\n    if (diffuse_weight > 0.0f && local_view_direction.z > 0.0f && local_to_light_direction.z > 0.0f)\n    {\n        float diffuse_pdf = principled_diffuse_pdf(material, local_view_direction, local_to_light_direction);\n\n        return diffuse_pdf * diffuse_proba;\n    }\n\n    return 0.0f;\n}\n\n/**\n * The \"glossy base\" is the combination of a specular GGX layer\n * on top of a diffuse BRDF.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F internal_eval_glossy_base(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    const float3& local_view_direction_rotated, const float3 local_to_light_direction_rotated, const float3& local_half_vector_rotated,\n    const float3& shading_normal,\n    float incident_medium_ior, float diffuse_weight, float specular_weight, bool refracting,\n    float diffuse_proba_norm, float specular_proba_norm,\n    ColorRGB32F& layers_throughput, float& out_cumulative_pdf)\n{\n    ColorRGB32F glossy_base_contribution = ColorRGB32F(0.0f);\n\n    // Evaluating the two components of the glossy base\n    glossy_base_contribution += internal_eval_specular_layer(render_data, bsdf_context,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, shading_normal,\n        incident_medium_ior, specular_weight, refracting, specular_proba_norm, layers_throughput, out_cumulative_pdf);\n    glossy_base_contribution += internal_eval_diffuse_layer(render_data, incident_medium_ior, bsdf_context.material, local_view_direction, local_to_light_direction, diffuse_weight, diffuse_proba_norm, layers_throughput, out_cumulative_pdf);\n\n    float glossy_base_energy_compensation = get_principled_energy_compensation_glossy_base(render_data, bsdf_context.material, incident_medium_ior, local_view_direction.z, bsdf_context.current_bounce);\n    return glossy_base_contribution / glossy_base_energy_compensation;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float internal_pdf_glossy_base(const HIPRTRenderData& render_data, BSDFContext& bsdf_context,\n    const float3& local_view_direction, const float3 local_to_light_direction, const float3& local_half_vector,\n    const float3& local_view_direction_rotated, const float3 local_to_light_direction_rotated, const float3& local_half_vector_rotated,\n    const float3& shading_normal,\n    float incident_medium_ior, float diffuse_weight, float specular_weight, bool refracting,\n    float diffuse_proba_norm, float specular_proba_norm)\n{\n    float pdf = 0.0f;\n\n    // Evaluating the two components of the glossy base\n    pdf += internal_pdf_specular_layer(render_data, bsdf_context,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, shading_normal,\n        incident_medium_ior, specular_weight, refracting, specular_proba_norm);\n    pdf += internal_pdf_diffuse_layer(render_data, incident_medium_ior, bsdf_context.material, local_view_direction, local_to_light_direction, diffuse_weight, diffuse_proba_norm);\n\n    return pdf;\n}\n\n/**\n * Computes the lobes weights for the principled BSDF\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void principled_bsdf_get_lobes_weights(const DeviceUnpackedEffectiveMaterial& material,\n                                                                      bool outside_object,\n                                                                      float& out_coat_weight, float& out_sheen_weight,\n                                                                      float& out_metal_1_weight, float& out_metal_2_weight,\n                                                                      float& out_specular_weight,\n                                                                      float& out_diffuse_weight, \n                                                                      float& out_glass_weight, float& out_diffuse_transmission_weight)\n{\n    // Linear blending weights for the lobes\n    // \n    // Everytime we multiply by \"outside_object\" is because we want to disable\n    // the lobe if we're inside the object\n    //\n    // The layering follows the one of the principled BSDF of blender:\n    // [10] https://docs.blender.org/manual/fr/dev/render/shader_nodes/shader/principled.html\n\n    out_coat_weight = material.coat * outside_object;\n    out_sheen_weight = material.sheen * outside_object;\n    // Metal 1 and metal 2 are the two metallic lobes for the two roughnesses.\n    // Having 2 roughnesses (linearly blended together) can enable interesting effects\n    // that cannot be achieved with a single GGX metal lobe.\n    // \n    // See [Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017],\n    // \"Double Specular\" for more details\n    float metallic = material.metallic;\n    out_metal_1_weight = metallic * outside_object;\n    out_metal_2_weight = metallic * outside_object;\n\n    float second_roughness_weight = material.second_roughness_weight;\n    out_metal_1_weight = hippt::lerp(out_metal_1_weight, 0.0f, second_roughness_weight);\n    out_metal_2_weight = hippt::lerp(0.0f, out_metal_2_weight, second_roughness_weight);\n\n    float specular_transmission = material.specular_transmission;\n    float diffuse_transmission = material.diffuse_transmission;\n    out_glass_weight = !outside_object ? (1.0f - diffuse_transmission) : (1.0f - metallic) * (1.0f - diffuse_transmission) * specular_transmission;\n    out_diffuse_transmission_weight = !outside_object ? diffuse_transmission : (1.0f - metallic) * diffuse_transmission;\n\n    out_specular_weight = (1.0f - metallic) * (1.0f - specular_transmission * (1.0f - diffuse_transmission)) * material.specular * outside_object;\n    out_diffuse_weight = (1.0f - metallic) * (1.0f - specular_transmission) * (1.0f - diffuse_transmission) * outside_object;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void principled_bsdf_get_lobes_sampling_proba(const HIPRTRenderData& render_data,\n    const DeviceUnpackedEffectiveMaterial& material,\n    float NoV,\n    float incident_medium_ior,\n\n    float coat_weight, float sheen_weight, float metal_1_weight, float metal_2_weight,\n    float specular_weight, float diffuse_weight, float glass_weight, float diffuse_transmission_weight,\n\n    float& out_coat_sampling_proba, float& out_sheen_sampling_proba,\n    float& out_metal_1_sampling_proba, float& out_metal_2_sampling_proba,\n    float& out_specular_sampling_proba, float& out_diffuse_sampling_proba,\n    float& out_glass_sampling_proba, float& out_diffuse_transmission_sampling_proba)\n{\n#if PrincipledBSDFSampleGlossyBasedOnFresnel == KERNEL_OPTION_TRUE\n    // Adjusting the probability of sampling the diffuse or specular lobe based on the\n    // fresnel of the specular lobe\n    if (material.specular > 0.0f)\n    {\n        float specular_relative_ior = principled_specular_relative_ior(material, incident_medium_ior);\n        float specular_fresnel = full_fresnel_dielectric(NoV, specular_relative_ior);\n        float specular_fresnel_sampling_weight = specular_fresnel * material.specular;\n\n        // The specular weight gets affected\n        specular_weight *= specular_fresnel_sampling_weight;\n        // And everything that is below the specular also gets affected\n        diffuse_weight *= 1.0f - specular_fresnel_sampling_weight;\n        diffuse_transmission_weight *= 1.0f - specular_fresnel_sampling_weight;\n    }\n#endif\n\n#if PrincipledBSDFSampleCoatBasedOnFresnel == KERNEL_OPTION_TRUE\n    if (material.coat > 0.0f)\n    {\n        float coat_fresnel = full_fresnel_dielectric(NoV, material.coat_ior / incident_medium_ior);\n        float coat_fresnel_sampling_weight = coat_fresnel * material.coat;\n\n        // The coat weight gets affected\n        coat_weight *= coat_fresnel_sampling_weight;\n        // And everything that is below the coat also gets affected\n        sheen_weight *= 1.0f - coat_fresnel_sampling_weight;\n        metal_1_weight *= 1.0f - coat_fresnel_sampling_weight;\n        metal_2_weight *= 1.0f - coat_fresnel_sampling_weight;\n        specular_weight *= 1.0f - coat_fresnel_sampling_weight;\n        diffuse_weight *= 1.0f - coat_fresnel_sampling_weight;\n        glass_weight *= 1.0f - coat_fresnel_sampling_weight;\n        diffuse_transmission_weight *= 1.0f - coat_fresnel_sampling_weight;\n    }\n#endif\n\n    float normalize_factor = 1.0f / (coat_weight + sheen_weight\n                                     + metal_1_weight + metal_2_weight\n                                     + specular_weight + diffuse_weight\n                                     + glass_weight + diffuse_transmission_weight);\n\n    out_coat_sampling_proba = coat_weight * normalize_factor;\n    out_sheen_sampling_proba = sheen_weight * normalize_factor;\n    out_metal_1_sampling_proba = metal_1_weight * normalize_factor;\n    out_metal_2_sampling_proba = metal_2_weight * normalize_factor;\n    out_specular_sampling_proba = specular_weight * normalize_factor;\n    out_diffuse_sampling_proba = diffuse_weight * normalize_factor;\n    out_glass_sampling_proba = glass_weight * normalize_factor;\n    out_diffuse_transmission_sampling_proba = diffuse_transmission_weight * normalize_factor;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_bsdf_eval(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float& pdf)\n{\n    pdf = 0.0f;\n\n    // Only the glass lobe is considered when evaluating\n    // the BSDF from inside the object so we're going to use that\n    // 'outside_object' flag to nullify the other lobes if we're\n    // inside the object\n    //\n    // Note that we're always outside of thin materials, they have no volume interior\n    bool outside_object = !bsdf_context.volume_state.inside_material;\n    bool refracting = hippt::dot(bsdf_context.shading_normal, bsdf_context.to_light_direction) < 0.0f && outside_object;\n\n    float3 T, B;\n    build_ONB(bsdf_context.shading_normal, T, B);\n    float3 local_view_direction = world_to_local_frame(T, B, bsdf_context.shading_normal, bsdf_context.view_direction);\n    float3 local_to_light_direction = world_to_local_frame(T, B, bsdf_context.shading_normal, bsdf_context.to_light_direction);\n    float3 local_half_vector = hippt::normalize(local_view_direction + local_to_light_direction);\n\n    // Rotated ONB for the anisotropic GGX evaluation (metallic/glass lobes for example)\n    float3 TR, BR;\n    build_rotated_ONB(bsdf_context.shading_normal, TR, BR, bsdf_context.material.anisotropy_rotation * M_PI);\n    float3 local_view_direction_rotated = world_to_local_frame(TR, BR, bsdf_context.shading_normal, bsdf_context.view_direction);\n    float3 local_to_light_direction_rotated = world_to_local_frame(TR, BR, bsdf_context.shading_normal, bsdf_context.to_light_direction);\n    float3 local_half_vector_rotated = hippt::normalize(local_view_direction_rotated + local_to_light_direction_rotated);\n\n    float incident_medium_ior = bsdf_context.volume_state.incident_mat_index == /* air */ NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n\n    float coat_weight, sheen_weight, metal_1_weight, metal_2_weight;\n    float specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight;\n    principled_bsdf_get_lobes_weights(bsdf_context.material,\n        outside_object,\n        coat_weight, sheen_weight, metal_1_weight, metal_2_weight,\n        specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight);\n\n    float coat_proba, sheen_proba, metal_1_proba, metal_2_proba;\n    float specular_proba, diffuse_proba, glass_proba, diffuse_transmission_proba;\n    principled_bsdf_get_lobes_sampling_proba(render_data, bsdf_context.material, local_view_direction.z, incident_medium_ior,\n        coat_weight, sheen_weight, metal_1_weight, metal_2_weight,\n        specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight,\n\n        coat_proba, sheen_proba, metal_1_proba, metal_2_proba,\n        specular_proba, diffuse_proba, glass_proba, diffuse_transmission_proba);\n\n\n    // Keeps track of the remaining light's energy as we traverse layers\n    ColorRGB32F layers_throughput = ColorRGB32F(1.0f);\n    ColorRGB32F final_color = ColorRGB32F(0.0f);\n\n    // In the 'internal_eval_coat_layer' function calls below, we're passing\n    // 'weight * !refracting' so that lobes that do not allow refractions\n    // (which is pretty much all of them except glass) do no get evaluated\n    // (because their weight becomes 0)\n    final_color += internal_eval_coat_layer(render_data, bsdf_context,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        incident_medium_ior, refracting, coat_weight, coat_proba, layers_throughput, pdf);\n    final_color += internal_eval_sheen_layer(render_data, bsdf_context.material,\n        local_view_direction, local_to_light_direction,\n        refracting, sheen_weight, sheen_proba, layers_throughput, pdf);\n    final_color += internal_eval_metal_layer(render_data, bsdf_context, bsdf_context.material.roughness, bsdf_context.material.anisotropy,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, incident_medium_ior,\n        metal_1_weight * !refracting, metal_1_proba, layers_throughput, pdf);\n    final_color += internal_eval_metal_layer(render_data, bsdf_context, bsdf_context.material.second_roughness, bsdf_context.material.anisotropy,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, incident_medium_ior,\n        metal_2_weight * !refracting, metal_2_proba, layers_throughput, pdf);\n\n    // Careful here to evaluate the glass layer before the glossy\n    // base otherwise, layers_throughput is going to be modified\n    // by the specular layer evaluation (in the glossy base) to \n    // take the fresnel of the specular layer into account. \n    // But we don't want that for the glass layer. \n    // The glass layer isn't below the specular layer , it's \"next to\"\n    // the specular layer so we don't want the specular-layer-fresnel-attenuation\n    // there\n    final_color += internal_eval_glass_layer(render_data, bsdf_context, local_view_direction_rotated, local_to_light_direction_rotated, glass_weight, glass_proba, layers_throughput, pdf);\n    final_color += internal_eval_glossy_base(render_data, bsdf_context,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, bsdf_context.shading_normal,\n        incident_medium_ior, diffuse_weight * !refracting, specular_weight, refracting,\n        diffuse_proba, specular_proba,\n        layers_throughput, pdf);\n    final_color += internal_eval_diffuse_transmission_layer(render_data, bsdf_context.material,\n        bsdf_context.volume_state, bsdf_context.update_ray_volume_state,\n        local_view_direction, local_to_light_direction, diffuse_transmission_weight, diffuse_transmission_proba, layers_throughput, pdf);\n\n    // The clearcoat compensation is done here and not in the clearcoat function\n    // because the clearcoat sits on top of everything else. This means that the clearcoat\n    // closure contains the full BSDF below. So the full BSDF below + the clearcoat (= the whole BSDF actually)\n    // should be compensated, not just the clearcoat lobe. So that's why we're doing\n    // it here, after the full BSDF evaluation so that everything gets compensated\n    final_color /= get_principled_energy_compensation_clearcoat_lobe(render_data, bsdf_context.material, incident_medium_ior, local_view_direction.z, bsdf_context.current_bounce);\n\n    // TODO compare CPU rendering with and without\n    sanity_check</* CPUOnly */ true>(render_data, final_color, 0, 0);\n    return final_color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_bsdf_pdf(const HIPRTRenderData& render_data, BSDFContext& bsdf_context)\n{\n    float pdf = 0.0f;\n\n    // Only the glass lobe is considered when evaluating\n    // the BSDF from inside the object so we're going to use that\n    // 'outside_object' flag to nullify the other lobes if we're\n    // inside the object\n    //\n    // Note that we're always outside of thin materials, they have no volume interior\n    bool outside_object = !bsdf_context.volume_state.inside_material;\n    bool refracting = hippt::dot(bsdf_context.shading_normal, bsdf_context.to_light_direction) < 0.0f && outside_object;\n\n    float3 T, B;\n    build_ONB(bsdf_context.shading_normal, T, B);\n    float3 local_view_direction = world_to_local_frame(T, B, bsdf_context.shading_normal, bsdf_context.view_direction);\n    float3 local_to_light_direction = world_to_local_frame(T, B, bsdf_context.shading_normal, bsdf_context.to_light_direction);\n    float3 local_half_vector = hippt::normalize(local_view_direction + local_to_light_direction);\n\n    // Rotated ONB for the anisotropic GGX evaluation (metallic/glass lobes for example)\n    float3 TR, BR;\n    build_rotated_ONB(bsdf_context.shading_normal, TR, BR, bsdf_context.material.anisotropy_rotation * M_PI);\n    float3 local_view_direction_rotated = world_to_local_frame(TR, BR, bsdf_context.shading_normal, bsdf_context.view_direction);\n    float3 local_to_light_direction_rotated = world_to_local_frame(TR, BR, bsdf_context.shading_normal, bsdf_context.to_light_direction);\n    float3 local_half_vector_rotated = hippt::normalize(local_view_direction_rotated + local_to_light_direction_rotated);\n\n    float incident_medium_ior = bsdf_context.volume_state.incident_mat_index == /* air */ NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n\n    float coat_weight, sheen_weight, metal_1_weight, metal_2_weight;\n    float specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight;\n    principled_bsdf_get_lobes_weights(bsdf_context.material,\n        outside_object,\n        coat_weight, sheen_weight, metal_1_weight, metal_2_weight,\n        specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight);\n\n    float coat_proba, sheen_proba, metal_1_proba, metal_2_proba;\n    float specular_proba, diffuse_proba, glass_proba, diffuse_transmission_proba;\n    principled_bsdf_get_lobes_sampling_proba(render_data, bsdf_context.material, local_view_direction.z, incident_medium_ior,\n        coat_weight, sheen_weight, metal_1_weight, metal_2_weight,\n        specular_weight, diffuse_weight, glass_weight, diffuse_transmission_weight,\n\n        coat_proba, sheen_proba, metal_1_proba, metal_2_proba,\n        specular_proba, diffuse_proba, glass_proba, diffuse_transmission_proba);\n\n    // In the 'internal_eval_coat_layer' function calls below, we're passing\n    // 'weight * !refracting' so that lobes that do not allow refractions\n    // (which is pretty much all of them except glass) do no get evaluated\n    // (because their weight becomes 0)\n    pdf += internal_pdf_coat_layer(render_data, bsdf_context,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        incident_medium_ior, refracting, coat_weight, coat_proba);\n    pdf += internal_pdf_sheen_layer(render_data, bsdf_context.material, local_view_direction, local_to_light_direction, refracting, sheen_weight, sheen_proba);\n    pdf += internal_pdf_metal_layer(render_data, bsdf_context, bsdf_context.material.roughness, bsdf_context.material.anisotropy,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, incident_medium_ior,\n        metal_1_weight * !refracting, metal_1_proba);\n    pdf += internal_pdf_metal_layer(render_data, bsdf_context, bsdf_context.material.second_roughness, bsdf_context.material.anisotropy,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, incident_medium_ior,\n        metal_2_weight * !refracting, metal_2_proba);\n\n    // Careful here to evaluate the glass layer before the glossy\n    // base otherwise, layers_throughput is going to be modified\n    // by the specular layer evaluation (in the glossy base) to \n    // take the fresnel of the specular layer into account. \n    // But we don't want that for the glass layer. \n    // The glass layer isn't below the specular layer , it's \"next to\"\n    // the specular layer so we don't want the specular-layer-fresnel-attenuation\n    // there\n    pdf += internal_pdf_glass_layer(render_data, bsdf_context, \n        local_view_direction_rotated, local_to_light_direction_rotated, \n        glass_weight, glass_proba);\n    pdf += internal_pdf_glossy_base(render_data, bsdf_context,\n        local_view_direction, local_to_light_direction, local_half_vector,\n        local_view_direction_rotated, local_to_light_direction_rotated, local_half_vector_rotated, bsdf_context.shading_normal,\n        incident_medium_ior, diffuse_weight * !refracting, specular_weight, refracting,\n        diffuse_proba, specular_proba);\n    pdf += internal_pdf_diffuse_transmission_layer(local_view_direction, local_to_light_direction, diffuse_transmission_weight, diffuse_transmission_proba);\n\n    return pdf;\n}\n\n/**\n * If sampleDirectionOnly is 'true',, this function samples only the BSDF without \n * evaluating the contribution or the PDF of the BSDF. This function will then always return\n * ColorRGB32F(0.0f) and the 'pdf' out parameter will always be set to 0.0f\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F principled_bsdf_sample(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float3& output_direction, float& pdf, Xorshift32Generator& random_number_generator)\n{\n    pdf = 0.0f;\n\n    // Computing the weights for sampling the lobes\n    bool is_outside_object = !bsdf_context.volume_state.inside_material;\n\n    float coat_sampling_weight;\n    float sheen_sampling_weight;\n    float metal_1_sampling_weight;\n    float metal_2_sampling_weight;\n    float specular_sampling_weight;\n    float diffuse_sampling_weight;\n    float glass_sampling_weight;\n    float diffuse_transmission_weight;\n    principled_bsdf_get_lobes_weights(bsdf_context.material, is_outside_object,\n        coat_sampling_weight, sheen_sampling_weight, \n        metal_1_sampling_weight, metal_2_sampling_weight, \n        specular_sampling_weight, diffuse_sampling_weight, \n        glass_sampling_weight, diffuse_transmission_weight);\n\n    float coat_sampling_proba, sheen_sampling_proba, metal_1_sampling_proba;\n    float metal_2_sampling_proba, specular_sampling_proba, diffuse_sampling_proba;\n    float glass_sampling_proba, diffuse_transmission_sampling_proba;\n    float incident_medium_ior = bsdf_context.volume_state.incident_mat_index == /* air */ NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX ? 1.0f : render_data.buffers.materials_buffer.get_ior(bsdf_context.volume_state.incident_mat_index);\n    principled_bsdf_get_lobes_sampling_proba(render_data,\n        bsdf_context.material, hippt::dot(bsdf_context.view_direction, bsdf_context.shading_normal), incident_medium_ior,\n        coat_sampling_weight, sheen_sampling_weight, metal_1_sampling_weight, metal_2_sampling_weight,\n        specular_sampling_weight, diffuse_sampling_weight, glass_sampling_weight, diffuse_transmission_weight,\n\n        coat_sampling_proba, sheen_sampling_proba, metal_1_sampling_proba, metal_2_sampling_proba,\n        specular_sampling_proba, diffuse_sampling_proba, glass_sampling_proba, diffuse_transmission_sampling_proba);\n\n    // Not using a float[] array here because array[] are super poorly handled \n    // in general by the HIP compiler on AMD\n    float cdf0 = coat_sampling_proba;\n    float cdf1 = cdf0 + sheen_sampling_proba;\n    float cdf2 = cdf1 + metal_1_sampling_proba;\n    float cdf3 = cdf2 + metal_2_sampling_proba;\n    float cdf4 = cdf3 + specular_sampling_proba;\n    float cdf5 = cdf4 + diffuse_sampling_proba;\n    float cdf6 = cdf5 + diffuse_transmission_sampling_proba;\n    // The last cdf[] is implicitely 1.0f so don't need to include it\n\n    float rand_1 = random_number_generator();\n    bool sampling_diffuse_transmission_lobe = rand_1 > cdf5 && rand_1 < cdf6;\n    bool sampling_glass_lobe = rand_1 > cdf6;\n\n    if (bsdf_context.update_ray_volume_state)\n        if (!sampling_glass_lobe && !sampling_diffuse_transmission_lobe)\n            // We're going to sample a reflective lobe so we're poping the stack\n            //\n            // Note that we may also reflect from glass but the popping for that is done in glass_sample()\n            bsdf_context.volume_state.interior_stack.pop(false);\n\n    // Rotated ONB for the anisotropic GGX evaluation\n    float3 TR, BR;\n    build_rotated_ONB(bsdf_context.shading_normal, TR, BR, bsdf_context.material.anisotropy_rotation * M_PI);\n    float3 local_view_direction_rotated = world_to_local_frame(TR, BR, bsdf_context.shading_normal, bsdf_context.view_direction);\n\n    if (rand_1 < cdf0)\n    {\n        // Sampling the coat lobe\n\n        float3 TR_coat, BR_coat;\n        build_rotated_ONB(bsdf_context.shading_normal, TR_coat, BR_coat, bsdf_context.material.coat_anisotropy_rotation * M_PI);\n        float3 local_view_direction_rotated_coat = world_to_local_frame(TR_coat, BR_coat, bsdf_context.shading_normal, bsdf_context.view_direction);\n\n        // Giving some information about what the BSDF sampled to the caller\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE;\n        output_direction = local_to_world_frame(TR_coat, BR_coat, bsdf_context.shading_normal, principled_coat_sample(render_data, bsdf_context, local_view_direction_rotated_coat, random_number_generator));\n    }\n    else if (rand_1 < cdf1)\n    {\n        // Sampling the sheen lobe\n\n        float3 T, B;\n        build_ONB(bsdf_context.shading_normal, T, B);\n        float3 local_view_direction = world_to_local_frame(T, B, bsdf_context.shading_normal, bsdf_context.view_direction);\n\n        output_direction = local_to_world_frame(T, B, bsdf_context.shading_normal, principled_sheen_sample(render_data, bsdf_context.material, local_view_direction, bsdf_context.shading_normal, random_number_generator));\n    }\n    else if (rand_1 < cdf2)\n    {\n        // First metallic lobe sample\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_FIRST_METAL_LOBE;\n        output_direction = local_to_world_frame(TR, BR, bsdf_context.shading_normal, principled_metallic_sample(render_data, bsdf_context, bsdf_context.material.roughness, bsdf_context.material.anisotropy, local_view_direction_rotated, random_number_generator));\n    }\n    else if (rand_1 < cdf3)\n    {\n        // Second metallic lobe sample\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SECOND_METAL_LOBE;\n        output_direction = local_to_world_frame(TR, BR, bsdf_context.shading_normal, principled_metallic_sample(render_data, bsdf_context, bsdf_context.material.second_roughness, bsdf_context.material.anisotropy, local_view_direction_rotated, random_number_generator));\n    }\n    else if (rand_1 < cdf4)\n    {\n        // Sampling the specular lobe\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SPECULAR_LOBE;\n        output_direction = local_to_world_frame(TR, BR, bsdf_context.shading_normal, principled_specular_sample(render_data, bsdf_context, bsdf_context.material.roughness, bsdf_context.material.anisotropy, local_view_direction_rotated, random_number_generator));\n    }\n    else if (rand_1 < cdf5)\n    {\n        // No call to local_to_world_frame() since the sample diffuse functions\n        // already returns in world space around the given normal\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_LOBE;\n        output_direction = principled_diffuse_sample(bsdf_context.shading_normal, random_number_generator);\n    }\n    else if (rand_1 < cdf6)\n    {\n        // Diffuse transmission lobe\n        bsdf_context.incident_light_info = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_TRANSMISSION_LOBE;\n        output_direction = principled_diffuse_transmission_sample(bsdf_context.shading_normal, random_number_generator);\n    }\n    else\n        // When sampling the glass lobe, if we're reflecting off the glass, we're going to have to pop the stack.\n        // This is handled inside glass_sample because we cannot know from here if we refracted or reflected\n        output_direction = local_to_world_frame(TR, BR, bsdf_context.shading_normal, principled_glass_sample(render_data, bsdf_context, local_view_direction_rotated, random_number_generator));\n\n    if (hippt::dot(output_direction, bsdf_context.geometric_normal) < 0.0f && !sampling_glass_lobe && !sampling_diffuse_transmission_lobe)\n        // It can happen that the light direction sampled is below the geometric surface.\n        // \n        // We return 0.0 in this case if we didn't sample the glass lobe\n        // because no lobe other than the glass lobe (or diffuse transmission) allows refractions\n        return ColorRGB32F(0.0f);\n\n    // Just copying the context to add the incident light info\n    bsdf_context.to_light_direction = output_direction;\n\n    if constexpr (sampleDirectionOnly)\n    {\n        pdf = 0.0f;\n\n        return ColorRGB32F(0.0f);\n    }\n    else\n        return principled_bsdf_eval(render_data, bsdf_context, pdf);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/PrincipledEnergyCompensation.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_PRINCIPLED_ENERGY_COMPENSATION_H\n#define DEVICE_PRINCIPLED_ENERGY_COMPENSATION_H\n\n#include \"Device/includes/BSDFs/BSDFContext.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float principled_specular_relative_ior(const DeviceUnpackedEffectiveMaterial& material, float incident_medium_ior);\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_principled_energy_compensation_glossy_base(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, float incident_medium_ior, float NoV, int current_bounce)\n{\n    bool energy_compensation_disabled = !material.do_specular_energy_compensation;\n    bool roughness_low_enough = material.roughness < render_data.bsdfs_data.energy_compensation_roughness_threshold;\n    // If all we have for the glossy base is the diffuse layer (i.e. no specular\n    // layer because the specular weight is low, then we don't need energy compensation)\n    bool no_specular_layer = material.specular < 1.0e-3f;\n    bool max_bounce_reached = current_bounce > render_data.bsdfs_data.glossy_base_energy_compensation_max_bounce && render_data.bsdfs_data.glossy_base_energy_compensation_max_bounce > -1;\n    bool invalid_view_direction = NoV < 0.0f;\n    if (energy_compensation_disabled || roughness_low_enough || no_specular_layer || max_bounce_reached || invalid_view_direction)\n        return 1.0f;\n\n    float ms_compensation = 1.0f;\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoSpecularEnergyCompensation == KERNEL_OPTION_TRUE\n    int3 texture_dims = make_int3(GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_COS_THETA_O, GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_ROUGHNESS, GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR);\n\n    float ior = material.ior;\n    float relative_ior = principled_specular_relative_ior(material, incident_medium_ior);\n    if (hippt::abs(relative_ior - 1.0f) < 1.0e-3f)\n        // If the relative ior is very close to 1.0f,\n        // adding some offset to avoid singularities at 1.0f which cause\n        // fireflies\n        relative_ior += 1.0e-3f;\n\n    // We're storing cos_theta_o^2.5 in the LUT so we're retrieving with\n    // root 2.5\n    float view_dir_remapped = pow(NoV, 1.0f / 2.5f);\n    // sqrt(sqrt(F0)) here because we're storing F0^4 in the LUT\n    float F0_remapped = sqrt(sqrt(F0_from_eta_t_and_relative_ior(ior, relative_ior)));\n\n    float3 uvw = make_float3(view_dir_remapped, material.roughness, F0_remapped);\n    float multiple_scattering_compensation = sample_texture_3D_rgb_32bits(render_data.bsdfs_data.glossy_dielectric_directional_albedo, texture_dims, uvw, render_data.bsdfs_data.use_hardware_tex_interpolation).r;\n\n    // Applying the compensation term for energy preservation\n    // If material.specular == 1, then we want the full energy compensation\n    // If material.specular == 0, then we only have the diffuse lobe and so we\n    // need no energy compensation at all and so we just divide by 1 to basically do nothing\n    ms_compensation = hippt::lerp(1.0f, multiple_scattering_compensation, material.specular);\n    // Multi scatter compensation is not tabulated to take thin film interference into account.\n    // That's because thin film interference completely modifies the fresnel term and the\n    // tabulated multi scatter compensation only accounts for the usual dielectric fresnel\n    // \n    // So we're progressively disabling ms compensation on the glossy base as the thin-film \n    // is more and more pronounced\n    ms_compensation = hippt::lerp(ms_compensation, 1.0f, material.thin_film);\n#endif\n\n    return ms_compensation;\n}\n\n/**\n * This function gives an approximation of the energy lost by the clearcoat layer\n * by assuming that whatever is under the clearcoat is lambertian (which *may*\n * obviously be a very rough approximation, depending on what's the BSDF below the clearcoat)\n * \n * This basically treats the clearcoat layer exactly the same as a specular/diffuse\n * (just like the \"glossy base\" of the principled BSDF) and so that's why we're using the LUTs\n * of the glossy base\n * \n * The approximation can be harsh but in reasonable scenarios (where we're clearcoating something \n * quite diffuse: which is usually the case because WHO CLEARCOATS A MIRROR?), it's actually quite good and \n * it's way better than nothing and cheap compared to the full on-the-fly integration that we\n * would have to do otherwise (or full interlayer-multiple-scattering simulation)\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_principled_energy_compensation_clearcoat_lobe(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, float incident_medium_ior, float NoV, int current_bounce)\n{\n    bool energy_compensation_disabled = !material.do_specular_energy_compensation;\n    // If we don't have a clearcoat, let's not compensate energy\n    bool no_coat_layer = material.coat < 1.0e-3f;\n    bool max_bounce_reached = current_bounce > render_data.bsdfs_data.clearcoat_energy_compensation_max_bounce && render_data.bsdfs_data.clearcoat_energy_compensation_max_bounce > -1;\n    bool invalid_view_direction = NoV < 0.0f;\n    if (energy_compensation_disabled || no_coat_layer || max_bounce_reached || invalid_view_direction)\n        return 1.0f;\n\n    float ms_compensation = 1.0f;\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoClearcoatEnergyCompensation == KERNEL_OPTION_TRUE\n    int3 texture_dims = make_int3(GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_COS_THETA_O, GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_ROUGHNESS, GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR);\n\n    if (hippt::abs(material.coat_ior / incident_medium_ior - 1.0f) < 1.0e-3f)\n        // If the relative ior is very close to 1.0f,\n        // adding some offset to avoid singularities which cause\n        // fireflies\n        incident_medium_ior += 1.0e-3f;\n\n    // We're storing cos_theta_o^2.5 in the LUT so we're retrieving with\n    // root 2.5\n    float view_dir_remapped = pow(NoV, 1.0f / 2.5f);\n    // sqrt(sqrt(F0)) here because we're storing F0^4 in the LUT\n    float F0_remapped = sqrt(sqrt(F0_from_eta(material.coat_ior, incident_medium_ior)));\n\n    float3 uvw = make_float3(view_dir_remapped, material.coat_roughness, F0_remapped);\n    float multiple_scattering_compensation = sample_texture_3D_rgb_32bits(render_data.bsdfs_data.glossy_dielectric_directional_albedo, texture_dims, uvw, render_data.bsdfs_data.use_hardware_tex_interpolation).r;\n\n    // Applying the compensation term for energy preservation\n    // If material.coat == 1, then we want the full energy compensation\n    // If material.coat == 0, then we only have the diffuse lobe and so we\n    // need no energy compensation at all and so we just divide by 1 to basically do nothing\n    //\n    // We're also disabling the compensation when the clearcoat is on top of a glass\n    // transmission lobe because the approximation here falls apart and can gain quite a bit\n    // of energy.\n    ms_compensation = hippt::lerp(1.0f, multiple_scattering_compensation, material.coat * (1.0f - material.specular_transmission));\n    // Multi scatter compensation is not tabulated to take thin film interference into account.\n    // That's because thin film interference completely modifies the fresnel term and the\n    // tabulated multi scatter compensation only accounts for the usual dielectric fresnel\n    // \n    // So we're progressively disabling ms compensation on the glossy base as the thin-film \n    // is more and more pronounced\n    ms_compensation = hippt::lerp(ms_compensation, 1.0f, material.thin_film);\n#endif\n\n    return ms_compensation;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/SheenLTC.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_BSDFS_SHEEN_LTC\n#define DEVICE_INCLUDES_BSDFS_SHEEN_LTC\n\n#include \"Device/includes/BSDFs/SheenLTCFittedParameters.h\"\n#include \"Device/includes/Texture.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n/**\n * Reference:\n * \n * [1] [Practical Multiple-Scattering Sheen Using Linearly Transformed Cosines] https://tizianzeltner.com/projects/Zeltner2022Practical/\n * [2] [Real-Time Polygonal-Light Shading with Linearly Transformed Cosines] https://eheitzresearch.wordpress.com/415-2/\n * [3] [Blender's Cycles Implementation] https://github.com/blender/cycles/blob/main/src/kernel/closure/bsdf_sheen.h\n */\n\nHIPRT_DEVICE HIPRT_INLINE float eval_ltc(const float3& to_light_direction_standard, const ColorRGB32F& AiBiRi)\n{\n\t// AiBiRi are the parameters of the LTC such that\n\t//        { Ai 0  Bi }\n\t// M^-1 = { 0  Ai 0  }\n\t//\t\t  { 0  0  1  }\n\t//\n\t// Bringing the to_light_direction into the \"LTC space\",\n\t// with identity transformation is thus done by multiplying\n\t// the direction by the M^-1 matrix\n\tfloat3 light_dir_original = make_float3(\n\t\tto_light_direction_standard.x * AiBiRi.r + to_light_direction_standard.z * AiBiRi.g,\n\t\tto_light_direction_standard.y * AiBiRi.r,\n\t\tto_light_direction_standard.z);\n\n\tfloat length = hippt::length(light_dir_original);\n\tlight_dir_original /= length; // Normalization\n\n\t// Determinant of M^-1\n\tfloat M_inv_determinant = AiBiRi.r * AiBiRi.r;\n\tfloat jacobian = M_inv_determinant / (length * length * length);\n\n\treturn light_dir_original.z * M_INV_PI * jacobian;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F read_LTC_parameters(const HIPRTRenderData& render_data, float roughness, float cos_theta)\n{\n\tconst void* ltc_parameters_texture_pointer;\n#ifdef __KERNELCC__\n\tltc_parameters_texture_pointer = &render_data.bsdfs_data.sheen_ltc_parameters_texture;\n#else\n\tltc_parameters_texture_pointer = render_data.bsdfs_data.sheen_ltc_parameters_texture;\n#endif\n\n\tfloat2 parameters_uv = make_float2(cos_theta, hippt::clamp(0.0f, 1.0f, roughness));\n\treturn sample_texture_rgb_32bits(ltc_parameters_texture_pointer, 0, false, parameters_uv, false);\n}\n\n/**\n * Returns the phi angle of a direction given in a canonical frame with Z up\n */\nHIPRT_DEVICE HIPRT_INLINE float get_phi(const float3& direction) \n{\n\tfloat p = atan2(direction.y, direction.x);\n\tif (p < 0.0f)\n\t\tp += M_TWO_PI;\n\n\treturn p;\n}\n\n/**\n * Rotates 'u' by 'angle' radians around 'axis'\n */\nHIPRT_DEVICE HIPRT_INLINE float3 rotate_vector(const float3& vec, const float3& axis, float angle) \n{\n\tfloat sin_angle = sin(angle);\n\tfloat cos_angle = cos(angle);\n\n\treturn vec * cos_angle + axis * hippt::dot(vec, axis) * (1.0f - cos_angle) + sin_angle * hippt::cross(axis, vec);\n}\n\nHIPRT_DEVICE float get_sheen_ltc_reflectance(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, const float3& local_view_direction)\n{\n\treturn read_LTC_parameters(render_data, material.sheen_roughness, local_view_direction.z).b;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sheen_ltc_eval(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, const float3& local_to_light_direction, const float3& local_view_direction, float& out_pdf, float& out_sheen_reflectance)\n{\n\tif (local_view_direction.z <= 0.0f || local_to_light_direction.z <= 0.0f)\n\t{\n\t\tout_pdf = 0.0f;\n\t\tif (local_view_direction.z > 0.0f)\n\t\t\tout_sheen_reflectance = get_sheen_ltc_reflectance(render_data, material, local_view_direction);\n\t\telse\n\t\t\tout_sheen_reflectance = 0.0f;\n\n\t\treturn ColorRGB32F(0.0f);\n\t}\n\n\t// The LTC needs to be evaluated in a Z-up coordinate frame with view direction aligned\n\t// with phi=0 (so no rotation on the X/Y plane).\n\t// \n\t// We're thus computing the phi angle and then rotating the to light direction backwards\n\t// on that phi angle so that the view direction is at phi=0.\n\tfloat phi = get_phi(local_view_direction);\n\n\t// Rotating the to light direction around z axis such that the view direction is aligned\n\t// with phi=0 (because we computed the rotation angle, phi, from the view direction)\n\tfloat3 to_light_standard_frame = rotate_vector(local_to_light_direction, make_float3(0.0f, 0.0f, 1.0f), -phi);\n\n\tColorRGB32F AiBiRi = read_LTC_parameters(render_data, material.sheen_roughness, local_view_direction.z);\n\tfloat Do = eval_ltc(to_light_standard_frame, AiBiRi);\n\n\tout_pdf = Do;\n\tout_sheen_reflectance = AiBiRi.b;\n\t// The cosine term is included in the LTC distribution but the renderer expects that\n\t// the cosine term isn't included in the BSDFs so we cancel it here.\n\treturn material.sheen_color * AiBiRi.b * Do / local_to_light_direction.z;\n}\n\nHIPRT_DEVICE HIPRT_INLINE float sheen_ltc_pdf(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, const float3& local_to_light_direction, const float3& local_view_direction)\n{\n\tif (local_view_direction.z <= 0.0f || local_to_light_direction.z <= 0.0f)\n\t\treturn 0.0f;\n\n\t// The LTC needs to be evaluated in a Z-up coordinate frame with view direction aligned\n\t// with phi=0 (so no rotation on the X/Y plane).\n\t// \n\t// We're thus computing the phi angle and then rotating the to light direction backwards\n\t// on that phi angle so that the view direction is at phi=0.\n\tfloat phi = get_phi(local_view_direction);\n\n\t// Rotating the to light direction around z axis such that the view direction is aligned\n\t// with phi=0 (because we computed the rotation angle, phi, from the view direction)\n\tfloat3 to_light_standard_frame = rotate_vector(local_to_light_direction, make_float3(0.0f, 0.0f, 1.0f), -phi);\n\n\tColorRGB32F AiBiRi = read_LTC_parameters(render_data, material.sheen_roughness, local_view_direction.z);\n\tfloat Do = eval_ltc(to_light_standard_frame, AiBiRi);\n\n\treturn Do;\n}\n\nHIPRT_DEVICE HIPRT_INLINE float3 sheen_ltc_sample(const HIPRTRenderData& render_data, const DeviceUnpackedEffectiveMaterial& material, const float3& local_view_direction, const float3& shading_normal, Xorshift32Generator& random_number_generator)\n{\n\t// Sampling a direction in the original space of the LTC\n\tfloat3 cosine_sample = cosine_weighted_sample_z_up_frame(random_number_generator);\n\n\tColorRGB32F AiBiRi = read_LTC_parameters(render_data, material.sheen_roughness, local_view_direction.z);\n\n\t// And then from the transformation matrix of the LTC, we're going to bring that\n\t// sampled direction back to the local space of the BSDF (shading/tangent space)\n\t// For that, we need to multiply that standard sampled direction by the matrix M\n\t// which is (M^-1)^-1 and we already have M^-1 from AiRiBi, we just to invert it\n\t// and its inverse actually is\n\t//\n\t//      { 1/Ai       0      -Bi/Ai }\n\t//  M = { 0         1/Ai       0   }\n\t//\t\t{ 0          0         1   }\n\t// \n\n\tfloat Ai_inv = 1.0f / AiBiRi.r;\n\tfloat Bi = AiBiRi.g;\n\n\t// Creating the sampled direction in a space at phi=0\n\tfloat3 sampled_direction_ltc_space = hippt::normalize(make_float3(cosine_sample.x * Ai_inv - cosine_sample.z * Bi * Ai_inv, cosine_sample.y * Ai_inv, cosine_sample.z));\n\n\t// Bringing out of the phi=0 configuration by rotating\n\treturn rotate_vector(sampled_direction_ltc_space, make_float3(0.0f, 0.0f, 1.0f), get_phi(local_view_direction));\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/BSDFs/SheenLTCFittedParameters.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_BSDFS_SHEEN_LTC_PARAMETERS\n#define DEVICE_INCLUDES_BSDFS_SHEEN_LTC_PARAMETERS\n\n#ifndef __KERNELCC__\n// This file should not be included on the GPU\n\n /**\n  * Reference:\n  *\n  * [1]: [Practical Multiple-Scattering Sheen Using Linearly Transformed Cosines - Github] [https://github.com/tizian/ltc-sheen/blob/master/fitting/python/data/ltc_table_sheen_approx.cpp]\n  */\n\n#include \"HostDeviceCommon/Math.h\"\n\n#include <array>\n\n  /**\n   * Precomputed parameters for the fitted LTC (Linearly Transformed Cosine)\n   * distribution of an analytic approximation of the reference volumetric SGGX\n   * sheen layer.\n   *\n   * Sampled as [y][x] = float3(Ai, Bi, Ri) with:\n   *  y = cos(theta)\n   *  x = alpha\n   */\n\nstatic std::array<float3, 32*32> ltc_parameters_table_approximation = {\n\n        make_float3(0.10027f, -0.00000f, 0.33971f), make_float3(0.10760f, -0.00000f, 0.35542f), make_float3(0.11991f, 0.00001f, 0.30888f),\n        make_float3(0.13148f, 0.00001f, 0.23195f), make_float3(0.14227f, 0.00001f, 0.15949f), make_float3(0.15231f, -0.00000f, 0.10356f),\n        make_float3(0.16168f, -0.00000f, 0.06466f), make_float3(0.17044f, 0.00000f, 0.03925f), make_float3(0.17867f, 0.00001f, 0.02334f),\n        make_float3(0.18645f, 0.00000f, 0.01366f), make_float3(0.19382f, -0.00000f, 0.00790f), make_float3(0.20084f, -0.00001f, 0.00452f),\n        make_float3(0.20754f, 0.00001f, 0.00257f), make_float3(0.21395f, 0.00000f, 0.00145f), make_float3(0.22011f, 0.00000f, 0.00081f),\n        make_float3(0.22603f, -0.00000f, 0.00045f), make_float3(0.23174f, -0.00001f, 0.00025f), make_float3(0.23726f, 0.00000f, 0.00014f),\n        make_float3(0.24259f, -0.00001f, 0.00008f), make_float3(0.24777f, -0.00001f, 0.00004f), make_float3(0.25279f, -0.00001f, 0.00002f),\n        make_float3(0.25768f, 0.00001f, 0.00001f), make_float3(0.26243f, 0.00001f, 0.00001f), make_float3(0.26707f, -0.00000f, 0.00000f),\n        make_float3(0.27159f, -0.00000f, 0.00000f), make_float3(0.27601f, -0.00000f, 0.00000f), make_float3(0.28033f, 0.00000f, 0.00000f),\n        make_float3(0.28456f, -0.00000f, 0.00000f), make_float3(0.28870f, -0.00001f, 0.00000f), make_float3(0.29276f, -0.00000f, 0.00000f),\n        make_float3(0.29676f, 0.00000f, 0.00000f), make_float3(0.30067f, -0.00001f, 0.00000f),\n\n\n        make_float3(0.10068f, -0.00013f, 0.33844f), make_float3(0.10802f, -0.00001f, 0.35438f), make_float3(0.12031f, -0.00000f, 0.30859f),\n        make_float3(0.13190f, 0.00001f, 0.23230f), make_float3(0.14269f, 0.00000f, 0.16016f), make_float3(0.15274f, 0.00001f, 0.10429f),\n        make_float3(0.16211f, -0.00000f, 0.06529f), make_float3(0.17088f, -0.00000f, 0.03975f), make_float3(0.17912f, -0.00000f, 0.02370f),\n        make_float3(0.18691f, -0.00001f, 0.01391f), make_float3(0.19429f, -0.00000f, 0.00807f), make_float3(0.20132f, -0.00000f, 0.00463f),\n        make_float3(0.20803f, 0.00001f, 0.00264f), make_float3(0.21445f, 0.00000f, 0.00149f), make_float3(0.22061f, 0.00000f, 0.00084f),\n        make_float3(0.22655f, -0.00000f, 0.00047f), make_float3(0.23226f, -0.00000f, 0.00026f), make_float3(0.23779f, -0.00001f, 0.00015f),\n        make_float3(0.24314f, -0.00002f, 0.00008f), make_float3(0.24832f, -0.00001f, 0.00004f), make_float3(0.25336f, -0.00002f, 0.00002f),\n        make_float3(0.25824f, -0.00003f, 0.00001f), make_float3(0.26301f, -0.00005f, 0.00001f), make_float3(0.26766f, -0.00008f, 0.00000f),\n        make_float3(0.27220f, -0.00012f, 0.00000f), make_float3(0.27665f, -0.00019f, 0.00000f), make_float3(0.28101f, -0.00026f, 0.00000f),\n        make_float3(0.28532f, -0.00041f, 0.00000f), make_float3(0.28960f, -0.00058f, 0.00000f), make_float3(0.29389f, -0.00080f, 0.00000f),\n        make_float3(0.29830f, -0.00096f, 0.00000f), make_float3(0.30309f, 0.00000f, 0.00000f),\n\n\n        make_float3(0.09988f, -0.00743f, 0.33595f), make_float3(0.10928f, -0.00078f, 0.35135f), make_float3(0.12169f, -0.00015f, 0.30790f),\n        make_float3(0.13330f, -0.00006f, 0.23367f), make_float3(0.14412f, -0.00003f, 0.16253f), make_float3(0.15420f, -0.00004f, 0.10680f),\n        make_float3(0.16360f, -0.00002f, 0.06750f), make_float3(0.17240f, -0.00004f, 0.04148f), make_float3(0.18068f, -0.00003f, 0.02497f),\n        make_float3(0.18850f, -0.00004f, 0.01480f), make_float3(0.19591f, -0.00005f, 0.00867f), make_float3(0.20297f, -0.00005f, 0.00503f),\n        make_float3(0.20970f, -0.00008f, 0.00289f), make_float3(0.21616f, -0.00012f, 0.00165f), make_float3(0.22235f, -0.00015f, 0.00094f),\n        make_float3(0.22831f, -0.00020f, 0.00053f), make_float3(0.23407f, -0.00028f, 0.00030f), make_float3(0.23963f, -0.00039f, 0.00017f),\n        make_float3(0.24503f, -0.00056f, 0.00009f), make_float3(0.25028f, -0.00084f, 0.00005f), make_float3(0.25541f, -0.00124f, 0.00003f),\n        make_float3(0.26045f, -0.00184f, 0.00002f), make_float3(0.26545f, -0.00274f, 0.00001f), make_float3(0.27049f, -0.00410f, 0.00001f),\n        make_float3(0.27569f, -0.00612f, 0.00000f), make_float3(0.28127f, -0.00908f, 0.00000f), make_float3(0.28762f, -0.01335f, 0.00000f),\n        make_float3(0.29542f, -0.01917f, 0.00000f), make_float3(0.30593f, -0.02649f, 0.00000f), make_float3(0.32153f, -0.03397f, 0.00000f),\n        make_float3(0.34697f, -0.03669f, 0.00000f), make_float3(0.39704f, -0.00000f, 0.00000f),\n\n\n        make_float3(0.08375f, -0.07516f, 0.33643f), make_float3(0.10999f, -0.00776f, 0.34873f), make_float3(0.12392f, -0.00160f, 0.30771f),\n        make_float3(0.13571f, -0.00070f, 0.23660f), make_float3(0.14661f, -0.00044f, 0.16701f), make_float3(0.15676f, -0.00034f, 0.11147f),\n        make_float3(0.16621f, -0.00032f, 0.07157f), make_float3(0.17508f, -0.00029f, 0.04470f), make_float3(0.18341f, -0.00033f, 0.02736f),\n        make_float3(0.19128f, -0.00038f, 0.01648f), make_float3(0.19876f, -0.00045f, 0.00981f), make_float3(0.20587f, -0.00057f, 0.00579f),\n        make_float3(0.21267f, -0.00074f, 0.00339f), make_float3(0.21918f, -0.00096f, 0.00197f), make_float3(0.22544f, -0.00129f, 0.00114f),\n        make_float3(0.23150f, -0.00179f, 0.00066f), make_float3(0.23736f, -0.00249f, 0.00038f), make_float3(0.24308f, -0.00352f, 0.00022f),\n        make_float3(0.24871f, -0.00501f, 0.00012f), make_float3(0.25433f, -0.00721f, 0.00007f), make_float3(0.26003f, -0.01042f, 0.00004f),\n        make_float3(0.26604f, -0.01512f, 0.00002f), make_float3(0.27264f, -0.02192f, 0.00001f), make_float3(0.28039f, -0.03158f, 0.00001f),\n        make_float3(0.29024f, -0.04491f, 0.00000f), make_float3(0.30382f, -0.06232f, 0.00000f), make_float3(0.32395f, -0.08307f, 0.00000f),\n        make_float3(0.35533f, -0.10404f, 0.00000f), make_float3(0.40491f, -0.11883f, 0.00000f), make_float3(0.47816f, -0.11902f, 0.00000f),\n        make_float3(0.56774f, -0.09644f, 0.00000f), make_float3(0.66332f, -0.00000f, 0.00000f),\n\n\n        make_float3(0.05655f, -0.31167f, 0.32420f), make_float3(0.10687f, -0.03376f, 0.35090f), make_float3(0.12655f, -0.00804f, 0.31009f),\n        make_float3(0.13914f, -0.00363f, 0.24233f), make_float3(0.15029f, -0.00227f, 0.17455f), make_float3(0.16057f, -0.00177f, 0.11907f),\n        make_float3(0.17014f, -0.00156f, 0.07820f), make_float3(0.17910f, -0.00153f, 0.04999f), make_float3(0.18752f, -0.00162f, 0.03132f),\n        make_float3(0.19549f, -0.00182f, 0.01932f), make_float3(0.20304f, -0.00213f, 0.01178f), make_float3(0.21025f, -0.00261f, 0.00712f),\n        make_float3(0.21716f, -0.00326f, 0.00427f), make_float3(0.22380f, -0.00420f, 0.00255f), make_float3(0.23022f, -0.00554f, 0.00151f),\n        make_float3(0.23648f, -0.00740f, 0.00089f), make_float3(0.24264f, -0.01005f, 0.00053f), make_float3(0.24879f, -0.01381f, 0.00031f),\n        make_float3(0.25510f, -0.01911f, 0.00018f), make_float3(0.26177f, -0.02658f, 0.00011f), make_float3(0.26918f, -0.03698f, 0.00006f),\n        make_float3(0.27795f, -0.05122f, 0.00004f), make_float3(0.28910f, -0.07004f, 0.00002f), make_float3(0.30427f, -0.09359f, 0.00001f),\n        make_float3(0.32606f, -0.12066f, 0.00001f), make_float3(0.35822f, -0.14764f, 0.00001f), make_float3(0.40512f, -0.16863f, 0.00000f),\n        make_float3(0.46849f, -0.17766f, 0.00000f), make_float3(0.54169f, -0.17178f, 0.00000f), make_float3(0.61239f, -0.15052f, 0.00000f),\n        make_float3(0.67350f, -0.11117f, 0.00000f), make_float3(0.73152f, 0.00000f, 0.00000f),\n\n\n        make_float3(0.05336f, -0.34864f, 0.38172f), make_float3(0.09920f, -0.08509f, 0.36009f), make_float3(0.12900f, -0.02477f, 0.31816f),\n        make_float3(0.14348f, -0.01195f, 0.25287f), make_float3(0.15525f, -0.00763f, 0.18668f), make_float3(0.16584f, -0.00587f, 0.13092f),\n        make_float3(0.17560f, -0.00512f, 0.08853f), make_float3(0.18472f, -0.00492f, 0.05832f), make_float3(0.19329f, -0.00508f, 0.03768f),\n        make_float3(0.20140f, -0.00555f, 0.02399f), make_float3(0.20910f, -0.00634f, 0.01510f), make_float3(0.21647f, -0.00749f, 0.00942f),\n        make_float3(0.22355f, -0.00914f, 0.00584f), make_float3(0.23039f, -0.01140f, 0.00360f), make_float3(0.23709f, -0.01450f, 0.00221f),\n        make_float3(0.24371f, -0.01877f, 0.00135f), make_float3(0.25039f, -0.02458f, 0.00083f), make_float3(0.25730f, -0.03248f, 0.00051f),\n        make_float3(0.26474f, -0.04313f, 0.00031f), make_float3(0.27313f, -0.05721f, 0.00019f), make_float3(0.28319f, -0.07550f, 0.00012f),\n        make_float3(0.29598f, -0.09825f, 0.00008f), make_float3(0.31310f, -0.12490f, 0.00005f), make_float3(0.33683f, -0.15332f, 0.00003f),\n        make_float3(0.36995f, -0.17964f, 0.00002f), make_float3(0.41497f, -0.19882f, 0.00002f), make_float3(0.47174f, -0.20686f, 0.00001f),\n        make_float3(0.53490f, -0.20242f, 0.00001f), make_float3(0.59635f, -0.18603f, 0.00001f), make_float3(0.65092f, -0.15783f, 0.00001f),\n        make_float3(0.69798f, -0.11426f, 0.00001f), make_float3(0.74494f, 0.00000f, 0.00001f),\n\n\n        make_float3(0.05749f, -0.31793f, 0.44455f), make_float3(0.09398f, -0.14133f, 0.37908f), make_float3(0.13152f, -0.05344f, 0.33487f),\n        make_float3(0.14884f, -0.02831f, 0.27078f), make_float3(0.16170f, -0.01861f, 0.20554f), make_float3(0.17282f, -0.01431f, 0.14892f),\n        make_float3(0.18293f, -0.01233f, 0.10431f), make_float3(0.19231f, -0.01159f, 0.07128f), make_float3(0.20110f, -0.01165f, 0.04782f),\n        make_float3(0.20942f, -0.01233f, 0.03163f), make_float3(0.21733f, -0.01364f, 0.02070f), make_float3(0.22491f, -0.01559f, 0.01344f),\n        make_float3(0.23224f, -0.01830f, 0.00867f), make_float3(0.23938f, -0.02200f, 0.00557f), make_float3(0.24643f, -0.02692f, 0.00357f),\n        make_float3(0.25351f, -0.03342f, 0.00228f), make_float3(0.26079f, -0.04192f, 0.00146f), make_float3(0.26852f, -0.05298f, 0.00094f),\n        make_float3(0.27707f, -0.06708f, 0.00060f), make_float3(0.28698f, -0.08468f, 0.00039f), make_float3(0.29907f, -0.10591f, 0.00026f),\n        make_float3(0.31446f, -0.13028f, 0.00017f), make_float3(0.33466f, -0.15629f, 0.00012f), make_float3(0.36155f, -0.18122f, 0.00008f),\n        make_float3(0.39700f, -0.20145f, 0.00006f), make_float3(0.44194f, -0.21352f, 0.00005f), make_float3(0.49484f, -0.21535f, 0.00004f),\n        make_float3(0.55114f, -0.20661f, 0.00003f), make_float3(0.60550f, -0.18774f, 0.00003f), make_float3(0.65477f, -0.15830f, 0.00002f),\n        make_float3(0.69863f, -0.11427f, 0.00002f), make_float3(0.74332f, 0.00000f, 0.00002f),\n\n\n        make_float3(0.06502f, -0.28106f, 0.49493f), make_float3(0.09745f, -0.17506f, 0.41308f), make_float3(0.13592f, -0.08778f, 0.36191f),\n        make_float3(0.15585f, -0.05167f, 0.29839f), make_float3(0.17008f, -0.03538f, 0.23353f), make_float3(0.18198f, -0.02741f, 0.17549f),\n        make_float3(0.19260f, -0.02338f, 0.12792f), make_float3(0.20235f, -0.02153f, 0.09115f), make_float3(0.21146f, -0.02104f, 0.06384f),\n        make_float3(0.22006f, -0.02158f, 0.04414f), make_float3(0.22825f, -0.02299f, 0.03022f), make_float3(0.23612f, -0.02528f, 0.02053f),\n        make_float3(0.24374f, -0.02852f, 0.01387f), make_float3(0.25120f, -0.03287f, 0.00933f), make_float3(0.25860f, -0.03853f, 0.00626f),\n        make_float3(0.26608f, -0.04579f, 0.00420f), make_float3(0.27381f, -0.05493f, 0.00282f), make_float3(0.28203f, -0.06627f, 0.00189f),\n        make_float3(0.29109f, -0.08016f, 0.00128f), make_float3(0.30146f, -0.09668f, 0.00087f), make_float3(0.31381f, -0.11579f, 0.00060f),\n        make_float3(0.32900f, -0.13678f, 0.00042f), make_float3(0.34816f, -0.15845f, 0.00030f), make_float3(0.37257f, -0.17874f, 0.00022f),\n        make_float3(0.40358f, -0.19512f, 0.00016f), make_float3(0.44202f, -0.20502f, 0.00013f), make_float3(0.48743f, -0.20660f, 0.00010f),\n        make_float3(0.53748f, -0.19897f, 0.00008f), make_float3(0.58855f, -0.18183f, 0.00007f), make_float3(0.63751f, -0.15424f, 0.00006f),\n        make_float3(0.68300f, -0.11197f, 0.00005f), make_float3(0.72978f, 0.00001f, 0.00005f),\n\n\n        make_float3(0.07528f, -0.24711f, 0.53455f), make_float3(0.10863f, -0.18975f, 0.45764f), make_float3(0.14433f, -0.11815f, 0.39949f),\n        make_float3(0.16570f, -0.07688f, 0.33687f), make_float3(0.18121f, -0.05511f, 0.27245f), make_float3(0.19398f, -0.04327f, 0.21288f),\n        make_float3(0.20521f, -0.03673f, 0.16193f), make_float3(0.21545f, -0.03319f, 0.12068f), make_float3(0.22496f, -0.03159f, 0.08855f),\n        make_float3(0.23392f, -0.03136f, 0.06420f), make_float3(0.24244f, -0.03223f, 0.04612f), make_float3(0.25063f, -0.03408f, 0.03290f),\n        make_float3(0.25857f, -0.03690f, 0.02335f), make_float3(0.26634f, -0.04074f, 0.01650f), make_float3(0.27403f, -0.04570f, 0.01163f),\n        make_float3(0.28178f, -0.05195f, 0.00819f), make_float3(0.28972f, -0.05961f, 0.00577f), make_float3(0.29802f, -0.06886f, 0.00406f),\n        make_float3(0.30695f, -0.07982f, 0.00287f), make_float3(0.31682f, -0.09255f, 0.00204f), make_float3(0.32809f, -0.10692f, 0.00146f),\n        make_float3(0.34129f, -0.12259f, 0.00105f), make_float3(0.35714f, -0.13882f, 0.00077f), make_float3(0.37647f, -0.15448f, 0.00057f),\n        make_float3(0.40023f, -0.16805f, 0.00043f), make_float3(0.42931f, -0.17770f, 0.00033f), make_float3(0.46430f, -0.18158f, 0.00026f),\n        make_float3(0.50501f, -0.17811f, 0.00021f), make_float3(0.55015f, -0.16592f, 0.00018f), make_float3(0.59760f, -0.14331f, 0.00015f),\n        make_float3(0.64549f, -0.10572f, 0.00013f), make_float3(0.69674f, 0.00000f, 0.00012f),\n\n\n        make_float3(0.08968f, -0.22166f, 0.57013f), make_float3(0.12485f, -0.19606f, 0.50498f), make_float3(0.15755f, -0.13952f, 0.44492f),\n        make_float3(0.17936f, -0.09867f, 0.38452f), make_float3(0.19578f, -0.07393f, 0.32154f), make_float3(0.20935f, -0.05908f, 0.26116f),\n        make_float3(0.22123f, -0.05009f, 0.20725f), make_float3(0.23199f, -0.04467f, 0.16152f), make_float3(0.24197f, -0.04157f, 0.12414f),\n        make_float3(0.25134f, -0.04011f, 0.09438f), make_float3(0.26024f, -0.03986f, 0.07115f), make_float3(0.26879f, -0.04064f, 0.05329f),\n        make_float3(0.27706f, -0.04233f, 0.03970f), make_float3(0.28514f, -0.04488f, 0.02947f), make_float3(0.29311f, -0.04830f, 0.02181f),\n        make_float3(0.30105f, -0.05263f, 0.01611f), make_float3(0.30908f, -0.05790f, 0.01189f), make_float3(0.31731f, -0.06417f, 0.00877f),\n        make_float3(0.32591f, -0.07150f, 0.00648f), make_float3(0.33507f, -0.07986f, 0.00480f), make_float3(0.34502f, -0.08921f, 0.00356f),\n        make_float3(0.35610f, -0.09937f, 0.00266f), make_float3(0.36867f, -0.11001f, 0.00200f), make_float3(0.38323f, -0.12062f, 0.00151f),\n        make_float3(0.40031f, -0.13041f, 0.00116f), make_float3(0.42056f, -0.13839f, 0.00090f), make_float3(0.44465f, -0.14322f, 0.00070f),\n        make_float3(0.47318f, -0.14336f, 0.00056f), make_float3(0.50652f, -0.13700f, 0.00046f), make_float3(0.54470f, -0.12166f, 0.00038f),\n        make_float3(0.58750f, -0.09227f, 0.00032f), make_float3(0.63790f, 0.00000f, 0.00028f),\n\n\n        make_float3(0.10928f, -0.20982f, 0.60377f), make_float3(0.14371f, -0.19861f, 0.54677f), make_float3(0.17437f, -0.15359f, 0.48934f),\n        make_float3(0.19613f, -0.11610f, 0.43261f), make_float3(0.21313f, -0.09068f, 0.37256f), make_float3(0.22735f, -0.07402f, 0.31310f),\n        make_float3(0.23983f, -0.06310f, 0.25792f), make_float3(0.25113f, -0.05592f, 0.20914f), make_float3(0.26159f, -0.05128f, 0.16750f),\n        make_float3(0.27141f, -0.04843f, 0.13286f), make_float3(0.28075f, -0.04692f, 0.10458f), make_float3(0.28970f, -0.04644f, 0.08182f),\n        make_float3(0.29835f, -0.04682f, 0.06371f), make_float3(0.30679f, -0.04799f, 0.04942f), make_float3(0.31508f, -0.04984f, 0.03822f),\n        make_float3(0.32329f, -0.05234f, 0.02949f), make_float3(0.33149f, -0.05550f, 0.02272f), make_float3(0.33979f, -0.05931f, 0.01749f),\n        make_float3(0.34827f, -0.06373f, 0.01346f), make_float3(0.35704f, -0.06873f, 0.01036f), make_float3(0.36626f, -0.07428f, 0.00799f),\n        make_float3(0.37608f, -0.08028f, 0.00617f), make_float3(0.38672f, -0.08655f, 0.00478f), make_float3(0.39843f, -0.09283f, 0.00371f),\n        make_float3(0.41148f, -0.09873f, 0.00290f), make_float3(0.42626f, -0.10371f, 0.00228f), make_float3(0.44316f, -0.10703f, 0.00181f),\n        make_float3(0.46265f, -0.10766f, 0.00145f), make_float3(0.48525f, -0.10415f, 0.00117f), make_float3(0.51155f, -0.09426f, 0.00096f),\n        make_float3(0.54238f, -0.07326f, 0.00080f), make_float3(0.58108f, 0.00000f, 0.00068f),\n\n\n        make_float3(0.13051f, -0.20682f, 0.62432f), make_float3(0.16330f, -0.20073f, 0.57323f), make_float3(0.19267f, -0.16509f, 0.51965f),\n        make_float3(0.21421f, -0.13200f, 0.46714f), make_float3(0.23154f, -0.10730f, 0.41130f), make_float3(0.24629f, -0.08978f, 0.35487f),\n        make_float3(0.25931f, -0.07747f, 0.30102f), make_float3(0.27115f, -0.06883f, 0.25188f), make_float3(0.28213f, -0.06278f, 0.20849f),\n        make_float3(0.29244f, -0.05862f, 0.17111f), make_float3(0.30225f, -0.05585f, 0.13949f), make_float3(0.31166f, -0.05417f, 0.11309f),\n        make_float3(0.32077f, -0.05341f, 0.09129f), make_float3(0.32964f, -0.05335f, 0.07343f), make_float3(0.33834f, -0.05393f, 0.05890f),\n        make_float3(0.34693f, -0.05508f, 0.04713f), make_float3(0.35546f, -0.05671f, 0.03765f), make_float3(0.36403f, -0.05880f, 0.03004f),\n        make_float3(0.37268f, -0.06133f, 0.02395f), make_float3(0.38149f, -0.06421f, 0.01909f), make_float3(0.39055f, -0.06741f, 0.01521f),\n        make_float3(0.39997f, -0.07082f, 0.01213f), make_float3(0.40987f, -0.07434f, 0.00969f), make_float3(0.42039f, -0.07779f, 0.00775f),\n        make_float3(0.43169f, -0.08092f, 0.00622f), make_float3(0.44398f, -0.08341f, 0.00500f), make_float3(0.45749f, -0.08480f, 0.00404f),\n        make_float3(0.47249f, -0.08439f, 0.00328f), make_float3(0.48934f, -0.08117f, 0.00268f), make_float3(0.50847f, -0.07344f, 0.00220f),\n        make_float3(0.53061f, -0.05739f, 0.00183f), make_float3(0.55814f, -0.00000f, 0.00155f),\n\n\n        make_float3(0.15121f, -0.20607f, 0.62947f), make_float3(0.18300f, -0.20354f, 0.58335f), make_float3(0.21155f, -0.17590f, 0.53312f),\n        make_float3(0.23282f, -0.14755f, 0.48447f), make_float3(0.25031f, -0.12455f, 0.43318f), make_float3(0.26543f, -0.10703f, 0.38096f),\n        make_float3(0.27895f, -0.09394f, 0.33029f), make_float3(0.29131f, -0.08418f, 0.28305f), make_float3(0.30281f, -0.07693f, 0.24033f),\n        make_float3(0.31365f, -0.07157f, 0.20255f), make_float3(0.32398f, -0.06766f, 0.16971f), make_float3(0.33391f, -0.06487f, 0.14151f),\n        make_float3(0.34352f, -0.06300f, 0.11754f), make_float3(0.35289f, -0.06189f, 0.09733f), make_float3(0.36206f, -0.06138f, 0.08038f),\n        make_float3(0.37112f, -0.06139f, 0.06624f), make_float3(0.38010f, -0.06186f, 0.05449f), make_float3(0.38905f, -0.06272f, 0.04477f),\n        make_float3(0.39804f, -0.06390f, 0.03675f), make_float3(0.40711f, -0.06534f, 0.03014f), make_float3(0.41635f, -0.06699f, 0.02471f),\n        make_float3(0.42580f, -0.06874f, 0.02026f), make_float3(0.43556f, -0.07051f, 0.01662f), make_float3(0.44571f, -0.07214f, 0.01365f),\n        make_float3(0.45636f, -0.07346f, 0.01122f), make_float3(0.46763f, -0.07423f, 0.00924f), make_float3(0.47968f, -0.07408f, 0.00762f),\n        make_float3(0.49266f, -0.07254f, 0.00631f), make_float3(0.50681f, -0.06883f, 0.00524f), make_float3(0.52242f, -0.06160f, 0.00437f),\n        make_float3(0.54001f, -0.04778f, 0.00367f), make_float3(0.56112f, 0.00000f, 0.00312f),\n\n\n        make_float3(0.17197f, -0.20561f, 0.62587f), make_float3(0.20338f, -0.20599f, 0.58408f), make_float3(0.23149f, -0.18515f, 0.53684f),\n        make_float3(0.25251f, -0.16154f, 0.49150f), make_float3(0.27004f, -0.14094f, 0.44437f), make_float3(0.28542f, -0.12426f, 0.39642f),\n        make_float3(0.29931f, -0.11107f, 0.34949f), make_float3(0.31213f, -0.10073f, 0.30512f), make_float3(0.32413f, -0.09263f, 0.26430f),\n        make_float3(0.33550f, -0.08631f, 0.22751f), make_float3(0.34636f, -0.08139f, 0.19485f), make_float3(0.35682f, -0.07760f, 0.16619f),\n        make_float3(0.36695f, -0.07471f, 0.14127f), make_float3(0.37684f, -0.07258f, 0.11976f), make_float3(0.38652f, -0.07106f, 0.10128f),\n        make_float3(0.39607f, -0.07006f, 0.08549f), make_float3(0.40551f, -0.06949f, 0.07205f), make_float3(0.41490f, -0.06928f, 0.06064f),\n        make_float3(0.42427f, -0.06933f, 0.05099f), make_float3(0.43369f, -0.06961f, 0.04283f), make_float3(0.44319f, -0.07004f, 0.03597f),\n        make_float3(0.45283f, -0.07053f, 0.03019f), make_float3(0.46265f, -0.07097f, 0.02535f), make_float3(0.47273f, -0.07127f, 0.02128f),\n        make_float3(0.48314f, -0.07125f, 0.01788f), make_float3(0.49395f, -0.07070f, 0.01504f), make_float3(0.50527f, -0.06936f, 0.01267f),\n        make_float3(0.51721f, -0.06683f, 0.01069f), make_float3(0.52993f, -0.06247f, 0.00904f), make_float3(0.54362f, -0.05516f, 0.00766f),\n        make_float3(0.55866f, -0.04229f, 0.00653f), make_float3(0.57607f, -0.00000f, 0.00561f),\n\n\n        make_float3(0.19561f, -0.20369f, 0.62676f), make_float3(0.22742f, -0.20521f, 0.59059f), make_float3(0.25590f, -0.18887f, 0.54809f),\n        make_float3(0.27707f, -0.16911f, 0.50677f), make_float3(0.29477f, -0.15102f, 0.46369f), make_float3(0.31041f, -0.13567f, 0.41959f),\n        make_float3(0.32465f, -0.12298f, 0.37594f), make_float3(0.33788f, -0.11258f, 0.33408f), make_float3(0.35033f, -0.10408f, 0.29491f),\n        make_float3(0.36219f, -0.09713f, 0.25894f), make_float3(0.37355f, -0.09144f, 0.22638f), make_float3(0.38451f, -0.08677f, 0.19721f),\n        make_float3(0.39514f, -0.08296f, 0.17130f), make_float3(0.40551f, -0.07987f, 0.14843f), make_float3(0.41565f, -0.07736f, 0.12835f),\n        make_float3(0.42562f, -0.07533f, 0.11079f), make_float3(0.43546f, -0.07371f, 0.09548f), make_float3(0.44519f, -0.07242f, 0.08219f),\n        make_float3(0.45486f, -0.07138f, 0.07067f), make_float3(0.46449f, -0.07053f, 0.06071f), make_float3(0.47413f, -0.06981f, 0.05212f),\n        make_float3(0.48380f, -0.06914f, 0.04472f), make_float3(0.49355f, -0.06841f, 0.03835f), make_float3(0.50342f, -0.06755f, 0.03289f),\n        make_float3(0.51345f, -0.06643f, 0.02821f), make_float3(0.52370f, -0.06487f, 0.02421f), make_float3(0.53423f, -0.06265f, 0.02078f),\n        make_float3(0.54512f, -0.05946f, 0.01786f), make_float3(0.55646f, -0.05481f, 0.01537f), make_float3(0.56840f, -0.04776f, 0.01325f),\n        make_float3(0.58119f, -0.03618f, 0.01145f), make_float3(0.59545f, -0.00000f, 0.00995f),\n\n\n        make_float3(0.22163f, -0.20055f, 0.63021f), make_float3(0.25410f, -0.20231f, 0.59968f), make_float3(0.28326f, -0.18906f, 0.56225f),\n        make_float3(0.30475f, -0.17238f, 0.52492f), make_float3(0.32266f, -0.15662f, 0.48555f), make_float3(0.33852f, -0.14279f, 0.44484f),\n        make_float3(0.35302f, -0.13097f, 0.40410f), make_float3(0.36656f, -0.12093f, 0.36452f), make_float3(0.37937f, -0.11245f, 0.32693f),\n        make_float3(0.39160f, -0.10523f, 0.29188f), make_float3(0.40334f, -0.09909f, 0.25962f), make_float3(0.41470f, -0.09386f, 0.23022f),\n        make_float3(0.42572f, -0.08939f, 0.20363f), make_float3(0.43646f, -0.08557f, 0.17973f), make_float3(0.44696f, -0.08227f, 0.15834f),\n        make_float3(0.45725f, -0.07943f, 0.13927f), make_float3(0.46736f, -0.07697f, 0.12233f), make_float3(0.47733f, -0.07481f, 0.10732f),\n        make_float3(0.48718f, -0.07290f, 0.09404f), make_float3(0.49693f, -0.07117f, 0.08234f), make_float3(0.50661f, -0.06954f, 0.07203f),\n        make_float3(0.51626f, -0.06797f, 0.06298f), make_float3(0.52588f, -0.06635f, 0.05503f), make_float3(0.53552f, -0.06465f, 0.04807f),\n        make_float3(0.54521f, -0.06270f, 0.04198f), make_float3(0.55498f, -0.06041f, 0.03667f), make_float3(0.56487f, -0.05757f, 0.03203f),\n        make_float3(0.57494f, -0.05392f, 0.02799f), make_float3(0.58526f, -0.04906f, 0.02447f), make_float3(0.59592f, -0.04224f, 0.02142f),\n        make_float3(0.60709f, -0.03161f, 0.01879f), make_float3(0.61917f, -0.00000f, 0.01653f),\n\n\n        make_float3(0.24798f, -0.19716f, 0.63007f), make_float3(0.28069f, -0.19930f, 0.60332f), make_float3(0.31015f, -0.18883f, 0.56936f),\n        make_float3(0.33172f, -0.17504f, 0.53486f), make_float3(0.34964f, -0.16162f, 0.49830f), make_float3(0.36550f, -0.14954f, 0.46037f),\n        make_float3(0.38006f, -0.13890f, 0.42220f), make_float3(0.39370f, -0.12960f, 0.38484f), make_float3(0.40666f, -0.12148f, 0.34906f),\n        make_float3(0.41907f, -0.11438f, 0.31535f), make_float3(0.43103f, -0.10815f, 0.28399f), make_float3(0.44261f, -0.10266f, 0.25508f),\n        make_float3(0.45385f, -0.09783f, 0.22861f), make_float3(0.46481f, -0.09354f, 0.20450f), make_float3(0.47551f, -0.08974f, 0.18264f),\n        make_float3(0.48599f, -0.08633f, 0.16289f), make_float3(0.49627f, -0.08326f, 0.14510f), make_float3(0.50638f, -0.08047f, 0.12910f),\n        make_float3(0.51633f, -0.07791f, 0.11476f), make_float3(0.52614f, -0.07551f, 0.10192f), make_float3(0.53584f, -0.07321f, 0.09045f),\n        make_float3(0.54546f, -0.07096f, 0.08022f), make_float3(0.55500f, -0.06867f, 0.07111f), make_float3(0.56449f, -0.06628f, 0.06301f),\n        make_float3(0.57397f, -0.06367f, 0.05582f), make_float3(0.58345f, -0.06076f, 0.04944f), make_float3(0.59297f, -0.05731f, 0.04379f),\n        make_float3(0.60257f, -0.05316f, 0.03879f), make_float3(0.61231f, -0.04788f, 0.03438f), make_float3(0.62225f, -0.04079f, 0.03050f),\n        make_float3(0.63252f, -0.03022f, 0.02708f), make_float3(0.64336f, 0.00000f, 0.02411f),\n\n\n        make_float3(0.27722f, -0.19199f, 0.63565f), make_float3(0.30981f, -0.19408f, 0.61130f), make_float3(0.33943f, -0.18579f, 0.57961f),\n        make_float3(0.36102f, -0.17447f, 0.54708f), make_float3(0.37889f, -0.16322f, 0.51263f), make_float3(0.39470f, -0.15282f, 0.47688f),\n        make_float3(0.40920f, -0.14347f, 0.44082f), make_float3(0.42282f, -0.13509f, 0.40537f), make_float3(0.43577f, -0.12760f, 0.37118f),\n        make_float3(0.44819f, -0.12084f, 0.33873f), make_float3(0.46018f, -0.11477f, 0.30827f), make_float3(0.47180f, -0.10928f, 0.27992f),\n        make_float3(0.48310f, -0.10430f, 0.25370f), make_float3(0.49411f, -0.09979f, 0.22957f), make_float3(0.50485f, -0.09566f, 0.20745f),\n        make_float3(0.51535f, -0.09188f, 0.18722f), make_float3(0.52564f, -0.08837f, 0.16879f), make_float3(0.53572f, -0.08512f, 0.15202f),\n        make_float3(0.54562f, -0.08205f, 0.13680f), make_float3(0.55536f, -0.07912f, 0.12300f), make_float3(0.56495f, -0.07630f, 0.11052f),\n        make_float3(0.57441f, -0.07350f, 0.09925f), make_float3(0.58375f, -0.07067f, 0.08908f), make_float3(0.59300f, -0.06773f, 0.07992f),\n        make_float3(0.60219f, -0.06459f, 0.07168f), make_float3(0.61132f, -0.06115f, 0.06428f), make_float3(0.62043f, -0.05724f, 0.05763f),\n        make_float3(0.62954f, -0.05265f, 0.05168f), make_float3(0.63871f, -0.04703f, 0.04636f), make_float3(0.64797f, -0.03973f, 0.04161f),\n        make_float3(0.65743f, -0.02917f, 0.03737f), make_float3(0.66725f, -0.00000f, 0.03363f),\n\n\n        make_float3(0.30941f, -0.18550f, 0.64456f), make_float3(0.34168f, -0.18712f, 0.62252f), make_float3(0.37132f, -0.18024f, 0.59322f),\n        make_float3(0.39288f, -0.17081f, 0.56272f), make_float3(0.41063f, -0.16130f, 0.53026f), make_float3(0.42627f, -0.15243f, 0.49646f),\n        make_float3(0.44059f, -0.14431f, 0.46224f), make_float3(0.45403f, -0.13689f, 0.42843f), make_float3(0.46680f, -0.13010f, 0.39561f),\n        make_float3(0.47906f, -0.12388f, 0.36425f), make_float3(0.49089f, -0.11815f, 0.33457f), make_float3(0.50236f, -0.11285f, 0.30671f),\n        make_float3(0.51350f, -0.10796f, 0.28072f), make_float3(0.52435f, -0.10341f, 0.25657f), make_float3(0.53493f, -0.09917f, 0.23422f),\n        make_float3(0.54526f, -0.09519f, 0.21359f), make_float3(0.55537f, -0.09146f, 0.19458f), make_float3(0.56525f, -0.08792f, 0.17712f),\n        make_float3(0.57494f, -0.08453f, 0.16109f), make_float3(0.58444f, -0.08126f, 0.14642f), make_float3(0.59377f, -0.07806f, 0.13299f),\n        make_float3(0.60294f, -0.07488f, 0.12073f), make_float3(0.61197f, -0.07164f, 0.10955f), make_float3(0.62087f, -0.06830f, 0.09937f),\n        make_float3(0.62966f, -0.06479f, 0.09010f), make_float3(0.63836f, -0.06097f, 0.08168f), make_float3(0.64699f, -0.05673f, 0.07404f),\n        make_float3(0.65557f, -0.05183f, 0.06711f), make_float3(0.66415f, -0.04599f, 0.06085f), make_float3(0.67274f, -0.03858f, 0.05518f),\n        make_float3(0.68143f, -0.02812f, 0.05008f), make_float3(0.69032f, -0.00000f, 0.04550f),\n\n\n        make_float3(0.34235f, -0.17826f, 0.65206f), make_float3(0.37374f, -0.17952f, 0.63152f), make_float3(0.40289f, -0.17391f, 0.60389f),\n        make_float3(0.42408f, -0.16614f, 0.57496f), make_float3(0.44148f, -0.15827f, 0.54412f), make_float3(0.45676f, -0.15084f, 0.51197f),\n        make_float3(0.47071f, -0.14393f, 0.47937f), make_float3(0.48377f, -0.13752f, 0.44704f), make_float3(0.49618f, -0.13155f, 0.41554f),\n        make_float3(0.50808f, -0.12598f, 0.38525f), make_float3(0.51957f, -0.12076f, 0.35644f), make_float3(0.53070f, -0.11585f, 0.32922f),\n        make_float3(0.54151f, -0.11120f, 0.30365f), make_float3(0.55204f, -0.10681f, 0.27973f), make_float3(0.56231f, -0.10265f, 0.25742f),\n        make_float3(0.57233f, -0.09867f, 0.23667f), make_float3(0.58212f, -0.09486f, 0.21741f), make_float3(0.59168f, -0.09120f, 0.19957f),\n        make_float3(0.60106f, -0.08765f, 0.18306f), make_float3(0.61023f, -0.08416f, 0.16782f), make_float3(0.61923f, -0.08072f, 0.15376f),\n        make_float3(0.62806f, -0.07727f, 0.14081f), make_float3(0.63672f, -0.07376f, 0.12890f), make_float3(0.64525f, -0.07012f, 0.11795f),\n        make_float3(0.65365f, -0.06628f, 0.10790f), make_float3(0.66194f, -0.06216f, 0.09869f), make_float3(0.67013f, -0.05760f, 0.09025f),\n        make_float3(0.67825f, -0.05241f, 0.08254f), make_float3(0.68632f, -0.04628f, 0.07549f), make_float3(0.69436f, -0.03862f, 0.06906f),\n        make_float3(0.70244f, -0.02799f, 0.06321f), make_float3(0.71061f, 0.00000f, 0.05791f),\n\n\n        make_float3(0.37818f, -0.17017f, 0.65945f), make_float3(0.40866f, -0.17100f, 0.64009f), make_float3(0.43736f, -0.16627f, 0.61399f),\n        make_float3(0.45826f, -0.15979f, 0.58661f), make_float3(0.47535f, -0.15320f, 0.55744f), make_float3(0.49027f, -0.14692f, 0.52704f),\n        make_float3(0.50383f, -0.14103f, 0.49617f), make_float3(0.51647f, -0.13549f, 0.46545f), make_float3(0.52846f, -0.13026f, 0.43541f),\n        make_float3(0.53992f, -0.12530f, 0.40641f), make_float3(0.55095f, -0.12057f, 0.37864f), make_float3(0.56162f, -0.11605f, 0.35226f),\n        make_float3(0.57197f, -0.11171f, 0.32733f), make_float3(0.58204f, -0.10753f, 0.30384f), make_float3(0.59185f, -0.10350f, 0.28179f),\n        make_float3(0.60141f, -0.09961f, 0.26113f), make_float3(0.61074f, -0.09583f, 0.24181f), make_float3(0.61985f, -0.09214f, 0.22378f),\n        make_float3(0.62877f, -0.08853f, 0.20696f), make_float3(0.63747f, -0.08494f, 0.19131f), make_float3(0.64600f, -0.08136f, 0.17675f),\n        make_float3(0.65435f, -0.07776f, 0.16324f), make_float3(0.66254f, -0.07407f, 0.15070f), make_float3(0.67057f, -0.07025f, 0.13908f),\n        make_float3(0.67846f, -0.06622f, 0.12832f), make_float3(0.68622f, -0.06190f, 0.11837f), make_float3(0.69387f, -0.05716f, 0.10919f),\n        make_float3(0.70143f, -0.05182f, 0.10071f), make_float3(0.70890f, -0.04557f, 0.09289f), make_float3(0.71632f, -0.03786f, 0.08570f),\n        make_float3(0.72371f, -0.02730f, 0.07909f), make_float3(0.73113f, -0.00000f, 0.07304f),\n\n\n        make_float3(0.41622f, -0.16117f, 0.66557f), make_float3(0.44565f, -0.16160f, 0.64703f), make_float3(0.47380f, -0.15756f, 0.62216f),\n        make_float3(0.49434f, -0.15215f, 0.59618f), make_float3(0.51107f, -0.14662f, 0.56863f), make_float3(0.52562f, -0.14132f, 0.53998f),\n        make_float3(0.53876f, -0.13629f, 0.51088f), make_float3(0.55092f, -0.13152f, 0.48188f), make_float3(0.56240f, -0.12695f, 0.45343f),\n        make_float3(0.57332f, -0.12256f, 0.42585f), make_float3(0.58380f, -0.11831f, 0.39935f), make_float3(0.59391f, -0.11419f, 0.37404f),\n        make_float3(0.60370f, -0.11017f, 0.34998f), make_float3(0.61319f, -0.10625f, 0.32718f), make_float3(0.62242f, -0.10243f, 0.30564f),\n        make_float3(0.63141f, -0.09869f, 0.28533f), make_float3(0.64017f, -0.09501f, 0.26621f), make_float3(0.64871f, -0.09139f, 0.24823f),\n        make_float3(0.65705f, -0.08778f, 0.23136f), make_float3(0.66519f, -0.08420f, 0.21553f), make_float3(0.67315f, -0.08059f, 0.20071f),\n        make_float3(0.68092f, -0.07692f, 0.18683f), make_float3(0.68853f, -0.07317f, 0.17386f), make_float3(0.69599f, -0.06927f, 0.16174f),\n        make_float3(0.70329f, -0.06516f, 0.15044f), make_float3(0.71046f, -0.06075f, 0.13990f), make_float3(0.71751f, -0.05595f, 0.13009f),\n        make_float3(0.72444f, -0.05055f, 0.12095f), make_float3(0.73128f, -0.04430f, 0.11246f), make_float3(0.73804f, -0.03667f, 0.10458f),\n        make_float3(0.74474f, -0.02633f, 0.09728f), make_float3(0.75140f, 0.00000f, 0.09052f),\n\n\n        make_float3(0.45566f, -0.15078f, 0.67181f), make_float3(0.48376f, -0.15083f, 0.65404f), make_float3(0.51104f, -0.14734f, 0.63030f),\n        make_float3(0.53106f, -0.14281f, 0.60566f), make_float3(0.54730f, -0.13818f, 0.57963f), make_float3(0.56134f, -0.13375f, 0.55265f),\n        make_float3(0.57395f, -0.12949f, 0.52523f), make_float3(0.58556f, -0.12542f, 0.49788f), make_float3(0.59643f, -0.12148f, 0.47100f),\n        make_float3(0.60673f, -0.11762f, 0.44485f), make_float3(0.61657f, -0.11387f, 0.41962f), make_float3(0.62603f, -0.11017f, 0.39542f),\n        make_float3(0.63516f, -0.10653f, 0.37231f), make_float3(0.64399f, -0.10294f, 0.35030f), make_float3(0.65257f, -0.09939f, 0.32939f),\n        make_float3(0.66090f, -0.09589f, 0.30956f), make_float3(0.66900f, -0.09240f, 0.29078f), make_float3(0.67689f, -0.08893f, 0.27303f),\n        make_float3(0.68459f, -0.08546f, 0.25624f), make_float3(0.69209f, -0.08197f, 0.24041f), make_float3(0.69941f, -0.07844f, 0.22547f),\n        make_float3(0.70656f, -0.07483f, 0.21139f), make_float3(0.71353f, -0.07112f, 0.19815f), make_float3(0.72036f, -0.06724f, 0.18568f),\n        make_float3(0.72704f, -0.06317f, 0.17397f), make_float3(0.73357f, -0.05880f, 0.16298f), make_float3(0.73999f, -0.05404f, 0.15266f),\n        make_float3(0.74629f, -0.04872f, 0.14298f), make_float3(0.75247f, -0.04259f, 0.13392f), make_float3(0.75857f, -0.03514f, 0.12544f),\n        make_float3(0.76459f, -0.02515f, 0.11752f), make_float3(0.77054f, -0.00000f, 0.11012f),\n\n\n        make_float3(0.49546f, -0.13868f, 0.67921f), make_float3(0.52189f, -0.13841f, 0.66231f), make_float3(0.54795f, -0.13540f, 0.63977f),\n        make_float3(0.56715f, -0.13163f, 0.61642f), make_float3(0.58273f, -0.12780f, 0.59185f), make_float3(0.59613f, -0.12410f, 0.56640f),\n        make_float3(0.60809f, -0.12056f, 0.54055f), make_float3(0.61905f, -0.11715f, 0.51472f), make_float3(0.62925f, -0.11379f, 0.48927f),\n        make_float3(0.63886f, -0.11049f, 0.46444f), make_float3(0.64800f, -0.10724f, 0.44041f), make_float3(0.65676f, -0.10400f, 0.41727f),\n        make_float3(0.66517f, -0.10079f, 0.39507f), make_float3(0.67330f, -0.09757f, 0.37385f), make_float3(0.68117f, -0.09438f, 0.35358f),\n        make_float3(0.68879f, -0.09119f, 0.33427f), make_float3(0.69620f, -0.08798f, 0.31589f), make_float3(0.70340f, -0.08477f, 0.29841f),\n        make_float3(0.71041f, -0.08153f, 0.28180f), make_float3(0.71724f, -0.07825f, 0.26604f), make_float3(0.72389f, -0.07490f, 0.25108f),\n        make_float3(0.73037f, -0.07146f, 0.23691f), make_float3(0.73669f, -0.06790f, 0.22349f), make_float3(0.74288f, -0.06418f, 0.21078f),\n        make_float3(0.74892f, -0.06025f, 0.19877f), make_float3(0.75481f, -0.05603f, 0.18742f), make_float3(0.76059f, -0.05144f, 0.17669f),\n        make_float3(0.76625f, -0.04631f, 0.16657f), make_float3(0.77180f, -0.04042f, 0.15703f), make_float3(0.77725f, -0.03329f, 0.14803f),\n        make_float3(0.78262f, -0.02378f, 0.13957f), make_float3(0.78790f, -0.00000f, 0.13161f),\n\n\n        make_float3(0.53465f, -0.12522f, 0.68615f), make_float3(0.55925f, -0.12477f, 0.67008f), make_float3(0.58386f, -0.12219f, 0.64863f),\n        make_float3(0.60211f, -0.11908f, 0.62649f), make_float3(0.61691f, -0.11598f, 0.60327f), make_float3(0.62960f, -0.11298f, 0.57927f),\n        make_float3(0.64087f, -0.11008f, 0.55488f), make_float3(0.65112f, -0.10725f, 0.53052f), make_float3(0.66062f, -0.10446f, 0.50646f),\n        make_float3(0.66953f, -0.10170f, 0.48294f), make_float3(0.67795f, -0.09893f, 0.46011f), make_float3(0.68598f, -0.09617f, 0.43805f),\n        make_float3(0.69369f, -0.09339f, 0.41682f), make_float3(0.70109f, -0.09059f, 0.39644f), make_float3(0.70824f, -0.08778f, 0.37690f),\n        make_float3(0.71515f, -0.08494f, 0.35821f), make_float3(0.72185f, -0.08209f, 0.34034f), make_float3(0.72836f, -0.07918f, 0.32326f),\n        make_float3(0.73467f, -0.07624f, 0.30696f), make_float3(0.74082f, -0.07324f, 0.29141f), make_float3(0.74679f, -0.07016f, 0.27658f),\n        make_float3(0.75261f, -0.06698f, 0.26246f), make_float3(0.75828f, -0.06367f, 0.24901f), make_float3(0.76381f, -0.06018f, 0.23621f),\n        make_float3(0.76920f, -0.05650f, 0.22404f), make_float3(0.77446f, -0.05253f, 0.21247f), make_float3(0.77961f, -0.04820f, 0.20148f),\n        make_float3(0.78464f, -0.04336f, 0.19105f), make_float3(0.78957f, -0.03781f, 0.18115f), make_float3(0.79440f, -0.03111f, 0.17176f),\n        make_float3(0.79913f, -0.02219f, 0.16287f), make_float3(0.80378f, -0.00000f, 0.15446f),\n\n\n        make_float3(0.57256f, -0.11055f, 0.69171f), make_float3(0.59531f, -0.11002f, 0.67633f), make_float3(0.61841f, -0.10792f, 0.65578f),\n        make_float3(0.63563f, -0.10544f, 0.63470f), make_float3(0.64962f, -0.10298f, 0.61271f), make_float3(0.66156f, -0.10060f, 0.59006f),\n        make_float3(0.67214f, -0.09828f, 0.56710f), make_float3(0.68170f, -0.09602f, 0.54415f), make_float3(0.69050f, -0.09375f, 0.52148f),\n        make_float3(0.69871f, -0.09149f, 0.49928f), make_float3(0.70643f, -0.08920f, 0.47768f), make_float3(0.71376f, -0.08689f, 0.45676f),\n        make_float3(0.72074f, -0.08455f, 0.43658f), make_float3(0.72745f, -0.08218f, 0.41714f), make_float3(0.73389f, -0.07976f, 0.39844f),\n        make_float3(0.74011f, -0.07732f, 0.38048f), make_float3(0.74611f, -0.07484f, 0.36325f), make_float3(0.75193f, -0.07230f, 0.34672f),\n        make_float3(0.75756f, -0.06969f, 0.33088f), make_float3(0.76304f, -0.06702f, 0.31571f), make_float3(0.76835f, -0.06427f, 0.30118f),\n        make_float3(0.77352f, -0.06140f, 0.28727f), make_float3(0.77854f, -0.05841f, 0.27397f), make_float3(0.78343f, -0.05525f, 0.26124f),\n        make_float3(0.78818f, -0.05188f, 0.24909f), make_float3(0.79283f, -0.04824f, 0.23747f), make_float3(0.79736f, -0.04426f, 0.22638f),\n        make_float3(0.80178f, -0.03982f, 0.21580f), make_float3(0.80609f, -0.03471f, 0.20571f), make_float3(0.81032f, -0.02854f, 0.19608f),\n        make_float3(0.81445f, -0.02034f, 0.18691f), make_float3(0.81849f, 0.00001f, 0.17818f),\n\n\n        make_float3(0.60872f, -0.09464f, 0.69662f), make_float3(0.62966f, -0.09415f, 0.68177f), make_float3(0.65120f, -0.09250f, 0.66197f),\n        make_float3(0.66737f, -0.09060f, 0.64182f), make_float3(0.68051f, -0.08873f, 0.62094f), make_float3(0.69172f, -0.08690f, 0.59955f),\n        make_float3(0.70159f, -0.08513f, 0.57791f), make_float3(0.71047f, -0.08335f, 0.55632f), make_float3(0.71860f, -0.08158f, 0.53498f),\n        make_float3(0.72612f, -0.07978f, 0.51406f), make_float3(0.73318f, -0.07795f, 0.49369f), make_float3(0.73983f, -0.07608f, 0.47393f),\n        make_float3(0.74614f, -0.07417f, 0.45480f), make_float3(0.75217f, -0.07223f, 0.43634f), make_float3(0.75795f, -0.07023f, 0.41855f),\n        make_float3(0.76350f, -0.06820f, 0.40140f), make_float3(0.76885f, -0.06611f, 0.38489f), make_float3(0.77401f, -0.06395f, 0.36901f),\n        make_float3(0.77900f, -0.06175f, 0.35374f), make_float3(0.78383f, -0.05946f, 0.33905f), make_float3(0.78851f, -0.05707f, 0.32494f),\n        make_float3(0.79306f, -0.05459f, 0.31137f), make_float3(0.79745f, -0.05197f, 0.29835f), make_float3(0.80173f, -0.04920f, 0.28584f),\n        make_float3(0.80590f, -0.04623f, 0.27384f), make_float3(0.80993f, -0.04302f, 0.26232f), make_float3(0.81388f, -0.03949f, 0.25127f),\n        make_float3(0.81771f, -0.03554f, 0.24068f), make_float3(0.82144f, -0.03097f, 0.23053f), make_float3(0.82508f, -0.02547f, 0.22081f),\n        make_float3(0.82863f, -0.01815f, 0.21150f), make_float3(0.83210f, 0.00000f, 0.20258f),\n\n\n        make_float3(0.64301f, -0.07753f, 0.70123f), make_float3(0.66223f, -0.07712f, 0.68683f), make_float3(0.68223f, -0.07593f, 0.66763f),\n        make_float3(0.69736f, -0.07455f, 0.64827f), make_float3(0.70966f, -0.07320f, 0.62840f), make_float3(0.72015f, -0.07187f, 0.60815f),\n        make_float3(0.72934f, -0.07056f, 0.58775f), make_float3(0.73757f, -0.06924f, 0.56743f), make_float3(0.74506f, -0.06791f, 0.54737f),\n        make_float3(0.75196f, -0.06655f, 0.52771f), make_float3(0.75838f, -0.06515f, 0.50855f), make_float3(0.76441f, -0.06371f, 0.48992f),\n        make_float3(0.77009f, -0.06222f, 0.47188f), make_float3(0.77550f, -0.06069f, 0.45443f), make_float3(0.78065f, -0.05912f, 0.43757f),\n        make_float3(0.78559f, -0.05749f, 0.42129f), make_float3(0.79032f, -0.05583f, 0.40557f), make_float3(0.79488f, -0.05409f, 0.39042f),\n        make_float3(0.79926f, -0.05229f, 0.37579f), make_float3(0.80349f, -0.05042f, 0.36169f), make_float3(0.80758f, -0.04846f, 0.34810f),\n        make_float3(0.81154f, -0.04640f, 0.33499f), make_float3(0.81537f, -0.04423f, 0.32235f), make_float3(0.81906f, -0.04191f, 0.31018f),\n        make_float3(0.82266f, -0.03941f, 0.29845f), make_float3(0.82614f, -0.03670f, 0.28716f), make_float3(0.82952f, -0.03372f, 0.27629f),\n        make_float3(0.83279f, -0.03035f, 0.26582f), make_float3(0.83599f, -0.02647f, 0.25574f), make_float3(0.83908f, -0.02178f, 0.24605f),\n        make_float3(0.84209f, -0.01551f, 0.23672f), make_float3(0.84502f, 0.00001f, 0.22776f),\n\n\n        make_float3(0.67520f, -0.05931f, 0.70649f), make_float3(0.69276f, -0.05903f, 0.69240f), make_float3(0.71128f, -0.05822f, 0.67366f),\n        make_float3(0.72537f, -0.05730f, 0.65499f), make_float3(0.73687f, -0.05639f, 0.63599f), make_float3(0.74664f, -0.05549f, 0.61679f),\n        make_float3(0.75518f, -0.05459f, 0.59754f), make_float3(0.76280f, -0.05368f, 0.57840f), make_float3(0.76970f, -0.05276f, 0.55954f),\n        make_float3(0.77600f, -0.05180f, 0.54106f), make_float3(0.78185f, -0.05080f, 0.52303f), make_float3(0.78730f, -0.04977f, 0.50551f),\n        make_float3(0.79243f, -0.04869f, 0.48852f), make_float3(0.79726f, -0.04757f, 0.47207f), make_float3(0.80185f, -0.04641f, 0.45614f),\n        make_float3(0.80622f, -0.04521f, 0.44074f), make_float3(0.81040f, -0.04396f, 0.42583f), make_float3(0.81441f, -0.04265f, 0.41143f),\n        make_float3(0.81826f, -0.04130f, 0.39750f), make_float3(0.82195f, -0.03988f, 0.38404f), make_float3(0.82551f, -0.03837f, 0.37102f),\n        make_float3(0.82892f, -0.03679f, 0.35844f), make_float3(0.83223f, -0.03510f, 0.34628f), make_float3(0.83541f, -0.03331f, 0.33452f),\n        make_float3(0.83849f, -0.03135f, 0.32316f), make_float3(0.84146f, -0.02922f, 0.31218f), make_float3(0.84433f, -0.02686f, 0.30158f),\n        make_float3(0.84711f, -0.02421f, 0.29134f), make_float3(0.84980f, -0.02113f, 0.28144f), make_float3(0.85241f, -0.01739f, 0.27189f),\n        make_float3(0.85493f, -0.01240f, 0.26266f), make_float3(0.85737f, -0.00000f, 0.25376f),\n\n\n        make_float3(0.70511f, -0.04013f, 0.71307f), make_float3(0.72112f, -0.03997f, 0.69919f), make_float3(0.73819f, -0.03950f, 0.68079f),\n        make_float3(0.75128f, -0.03897f, 0.66265f), make_float3(0.76198f, -0.03845f, 0.64443f), make_float3(0.77108f, -0.03791f, 0.62615f),\n        make_float3(0.77901f, -0.03737f, 0.60793f), make_float3(0.78606f, -0.03682f, 0.58989f), make_float3(0.79240f, -0.03625f, 0.57212f),\n        make_float3(0.79819f, -0.03565f, 0.55474f), make_float3(0.80350f, -0.03502f, 0.53779f), make_float3(0.80844f, -0.03437f, 0.52130f),\n        make_float3(0.81305f, -0.03367f, 0.50532f), make_float3(0.81738f, -0.03295f, 0.48981f), make_float3(0.82148f, -0.03220f, 0.47479f),\n        make_float3(0.82537f, -0.03142f, 0.46024f), make_float3(0.82906f, -0.03059f, 0.44615f), make_float3(0.83259f, -0.02973f, 0.43250f),\n        make_float3(0.83596f, -0.02882f, 0.41928f), make_float3(0.83918f, -0.02787f, 0.40648f), make_float3(0.84227f, -0.02686f, 0.39407f),\n        make_float3(0.84523f, -0.02578f, 0.38205f), make_float3(0.84808f, -0.02463f, 0.37040f), make_float3(0.85081f, -0.02339f, 0.35913f),\n        make_float3(0.85345f, -0.02205f, 0.34819f), make_float3(0.85597f, -0.02057f, 0.33761f), make_float3(0.85842f, -0.01893f, 0.32734f),\n        make_float3(0.86076f, -0.01707f, 0.31740f), make_float3(0.86302f, -0.01492f, 0.30777f), make_float3(0.86519f, -0.01229f, 0.29845f),\n        make_float3(0.86729f, -0.00878f, 0.28941f), make_float3(0.86932f, -0.00000f, 0.28066f),\n\n\n        make_float3(0.73154f, -0.02030f, 0.72031f), make_float3(0.74609f, -0.02024f, 0.70643f), make_float3(0.76177f, -0.02006f, 0.68812f),\n        make_float3(0.77389f, -0.01984f, 0.67034f), make_float3(0.78383f, -0.01961f, 0.65270f), make_float3(0.79228f, -0.01939f, 0.63519f),\n        make_float3(0.79965f, -0.01914f, 0.61784f), make_float3(0.80618f, -0.01889f, 0.60072f), make_float3(0.81203f, -0.01864f, 0.58392f),\n        make_float3(0.81735f, -0.01837f, 0.56751f), make_float3(0.82221f, -0.01807f, 0.55151f), make_float3(0.82672f, -0.01776f, 0.53597f),\n        make_float3(0.83089f, -0.01744f, 0.52087f), make_float3(0.83482f, -0.01710f, 0.50623f), make_float3(0.83850f, -0.01673f, 0.49203f),\n        make_float3(0.84198f, -0.01635f, 0.47828f), make_float3(0.84528f, -0.01595f, 0.46494f), make_float3(0.84842f, -0.01552f, 0.45201f),\n        make_float3(0.85140f, -0.01508f, 0.43946f), make_float3(0.85424f, -0.01460f, 0.42730f), make_float3(0.85696f, -0.01408f, 0.41549f),\n        make_float3(0.85955f, -0.01354f, 0.40404f), make_float3(0.86204f, -0.01296f, 0.39292f), make_float3(0.86442f, -0.01233f, 0.38212f),\n        make_float3(0.86669f, -0.01164f, 0.37165f), make_float3(0.86887f, -0.01087f, 0.36148f), make_float3(0.87097f, -0.01002f, 0.35161f),\n        make_float3(0.87297f, -0.00905f, 0.34203f), make_float3(0.87490f, -0.00791f, 0.33272f), make_float3(0.87673f, -0.00653f, 0.32369f),\n        make_float3(0.87850f, -0.00466f, 0.31492f), make_float3(0.88020f, 0.00001f, 0.30640f),\n\n\n        make_float3(0.75486f, -0.00000f, 0.72806f), make_float3(0.76807f, 0.00000f, 0.71395f), make_float3(0.78246f, -0.00000f, 0.69552f),\n        make_float3(0.79366f, -0.00000f, 0.67790f), make_float3(0.80290f, 0.00001f, 0.66069f), make_float3(0.81077f, 0.00001f, 0.64378f),\n        make_float3(0.81763f, -0.00000f, 0.62716f), make_float3(0.82368f, -0.00000f, 0.61086f), make_float3(0.82912f, -0.00000f, 0.59491f),\n        make_float3(0.83404f, -0.00000f, 0.57936f), make_float3(0.83852f, -0.00000f, 0.56423f), make_float3(0.84266f, 0.00000f, 0.54953f),\n        make_float3(0.84649f, 0.00000f, 0.53526f), make_float3(0.85008f, -0.00000f, 0.52142f), make_float3(0.85343f, 0.00000f, 0.50800f),\n        make_float3(0.85660f, -0.00000f, 0.49498f), make_float3(0.85959f, 0.00000f, 0.48235f), make_float3(0.86241f, -0.00000f, 0.47011f),\n        make_float3(0.86510f, 0.00001f, 0.45821f), make_float3(0.86766f, 0.00000f, 0.44666f), make_float3(0.87010f, -0.00001f, 0.43545f),\n        make_float3(0.87242f, 0.00000f, 0.42456f), make_float3(0.87464f, -0.00000f, 0.41398f), make_float3(0.87675f, 0.00000f, 0.40369f),\n        make_float3(0.87877f, 0.00000f, 0.39370f), make_float3(0.88070f, -0.00000f, 0.38398f), make_float3(0.88255f, -0.00000f, 0.37453f),\n        make_float3(0.88431f, 0.00000f, 0.36535f), make_float3(0.88600f, 0.00000f, 0.35642f), make_float3(0.88761f, -0.00000f, 0.34773f),\n        make_float3(0.88915f, -0.00000f, 0.33929f), make_float3(0.89063f, -0.00000f, 0.33107f)\n};\n\n#endif // __KERNELCC__\n\n#endif"
  },
  {
    "path": "src/Device/includes/BSDFs/ThinFilm.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_THIN_FILM_H\n#define DEVICE_THIN_FILM_H\n\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\n// Evaluation XYZ sensitivity curves in Fourier space\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F eval_sensitivity(float opd, float shift)\n{\n    // Use Gaussian fits\n\n    float phase = 2.0f * M_PI * opd * 1.0e-6f;\n\n    float3 val = make_float3(5.4856e-13f, 4.4201e-13f, 5.2481e-13f);\n    float3 pos = make_float3(1.6810e+06f, 1.7953e+06f, 2.2084e+06f);\n    float3 var = make_float3(4.3278e+09f, 9.3046e+09f, 6.6121e+09f);\n    float3 xyz = val * hippt::sqrt(2.0f * M_PI * var) * hippt::cos(pos * phase + shift) * hippt::exp(-1.0f * var * phase * phase);\n\n    xyz.x += 9.7470e-14f * sqrt(2.0f * M_PI * 4.5282e+09f) * cos(2.2399e+06f * phase + shift) * exp(-4.5282e+09f * phase * phase);\n\n    return ColorRGB32F(xyz / 1.0685e-7f);\n}\n\n/**\n * Reference: * [1] [A Practical Extension to Microfacet Theory for the Modeling of Varying Iridescence, \n *                   Belcour, Barla, 2017, Supplemental document] https://hal.science/hal-01518344v2/file/supp-mat-small%20(1).pdf\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void fresnel_phase(float cos_theta_i,\n    float eta1,\n    float eta2, float kappa2,\n    float& phi_par, float& phi_perp) \n{\n    float sinThetaSqr = 1.0f - hippt::square(cos_theta_i);\n    float A = hippt::square(eta2) * (1.0f - hippt::square(kappa2)) - hippt::square(eta1) * sinThetaSqr;\n    float B = sqrt(hippt::square(A) + hippt::square(2 * hippt::square(eta2) * kappa2));\n    float U = sqrt((A + B) * 0.5);\n    float V = sqrt((B - A) * 0.5f);\n\n    float phi_perp_y = 2.0f * eta1 * V * cos_theta_i;\n    float phi_perp_x = hippt::square(U) + hippt::square(V) - hippt::square(eta1 * cos_theta_i);\n    phi_perp = atan2(phi_perp_y, phi_perp_x);\n\n    float phi_par_y = 2.0f * eta1 * hippt::square(eta2) * cos_theta_i * (2.0f * kappa2 * U - (1.0f - hippt::square(kappa2)) * V);\n    float phi_par_x = hippt::square(hippt::square(eta2) * (1.0f + hippt::square(kappa2)) * cos_theta_i) - hippt::square(eta1) * (hippt::square(U) + hippt::square(V));\n    phi_par = atan2(phi_par_y, phi_par_x);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void fresnel_conductor(float cos_theta_i,\n    float eta, float k,\n    float& Rp2, float& Rs2) \n{\n    float cos_theta_i_2 = cos_theta_i * cos_theta_i;\n    float sin_theta_i_2 = 1.0f - cos_theta_i_2;\n\n    float temp1 = eta * eta - k * k - sin_theta_i_2;\n    float a2pb2 = sqrt(temp1 * temp1 + 4.0f * k * k * eta * eta);\n    float a = sqrt(0.5f * (a2pb2 + temp1));\n\n    float term1 = a2pb2 + cos_theta_i_2;\n    float term2 = 2.0f * a * cos_theta_i;\n\n    Rs2 = (term1 - term2) / (term1 + term2);\n    Rs2 = hippt::clamp(0.0f, 1.0f, Rs2);\n\n    float term3 = a2pb2 * cos_theta_i_2 + sin_theta_i_2 * sin_theta_i_2;\n    float term4 = term2 * sin_theta_i_2;\n\n    Rp2 = Rs2 * (term3 - term4) / (term3 + term4);\n    Rp2 = hippt::clamp(0.0f, 1.0f, Rp2);\n}\n\n/**\n * Reference: https://stackoverflow.com/questions/8507885/shift-hue-of-an-rgb-color\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F RGB_hue_shift(const ColorRGB32F& color, float hue_shift_degrees)\n{\n    if (hue_shift_degrees == 0.0f)\n        return color;\n\n    float cosA = cos(hue_shift_degrees / 180.0f * M_PI);\n    float sinA = sin(hue_shift_degrees / 180.0f * M_PI);\n\n    float3x3 matrix;\n    constexpr float sqrt_1_3 = 0.57735026918962576451f; // sqrtf(1.0f / 3.0f)\n    constexpr float one_over_3 = 1.0f / 3.0f;\n\n    matrix.m[0][0] = cosA + (1.0f - cosA) / 3.0f;\n    matrix.m[0][1] = one_over_3 * (1.0f - cosA) - sqrt_1_3 * sinA;\n    matrix.m[0][2] = one_over_3 * (1.0f - cosA) + sqrt_1_3 * sinA;\n    matrix.m[1][0] = one_over_3 * (1.0f - cosA) + sqrt_1_3 * sinA;\n    matrix.m[1][1] = cosA + one_over_3 * (1.0f - cosA);\n    matrix.m[1][2] = one_over_3 * (1.0f - cosA) - sqrt_1_3 * sinA;\n    matrix.m[2][0] = one_over_3 * (1.0f - cosA) - sqrt_1_3 * sinA;\n    matrix.m[2][1] = one_over_3 * (1.0f - cosA) + sqrt_1_3 * sinA;\n    matrix.m[2][2] = cosA + one_over_3 * (1.0f - cosA);\n\n    ColorRGB32F hue_shifted;\n    hue_shifted.r = color.r * matrix.m[0][0] + color.g * matrix.m[0][1] + color.b * matrix.m[0][2];\n    hue_shifted.g = color.r * matrix.m[1][0] + color.g * matrix.m[1][1] + color.b * matrix.m[1][2];\n    hue_shifted.b = color.r * matrix.m[2][0] + color.g * matrix.m[2][1] + color.b * matrix.m[2][2];\n\n    hue_shifted.clamp(0.0f, 1.0f);\n    return hue_shifted;\n}\n\n/**\n * References:\n *\n * [1] [A Practical Extension to Microfacet Theory for the Modeling of Varying Iridescence, Belcour, Barla, 2017] https://belcour.github.io/blog/research/publication/2017/05/01/brdf-thin-film.html\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F thin_film_fresnel(const DeviceUnpackedEffectiveMaterial& material,\n    float ambient_IOR, float HoL)\n{\n    if (material.thin_film == 0.0f)\n        // Quick exit\n        return ColorRGB32F(0.0f);\n\n    float eta1 = ambient_IOR;\n    float eta2 = material.thin_film_ior;\n    float eta3 = material.thin_film_do_ior_override ? material.thin_film_base_ior_override : material.ior;\n    // If override is not used, just default to 0.0f because the principled BSDF doesn't have \n    // complex IORs support anyways\n    float kappa3 = material.thin_film_do_ior_override ? material.thin_film_kappa_3 : 0.0f;\n\n    /* Compute the Spectral versions of the Fresnel reflectance and\n     * transmitance for each interface. */\n    float R12p = 0.0f;\n    float R12s = 0.0f;\n    float T121p = 0.0f;\n    float T121s = 0.0f;\n    float R23p = 0.0f;\n    float R23s = 0.0f;\n    float cos_theta_2 = 0.0f;\n\n    float cos_theta_transmission_2 = 1.0f - (1.0f - hippt::square(HoL)) * hippt::square(eta1 / eta2);\n    if (cos_theta_transmission_2 <= 0.0f)\n    {\n        // Total internal reflection\n        R12s = 1.0f;\n        R12p = 1.0f;\n\n        // 0 transmission for total internal reflection\n        T121p = 0.0f;\n        T121s = 0.0f;\n    }\n    else\n    {\n        cos_theta_2 = sqrt(cos_theta_transmission_2);\n        fresnel_conductor(HoL, eta2 / eta1, 0.0f, R12p, R12s);\n\n        // Reflected part by the base\n        fresnel_conductor(cos_theta_2, eta3 / eta2, kappa3, R23p, R23s);\n\n        // Compute the transmission coefficients\n        T121p = 1.0f - R12p;\n        T121s = 1.0f - R12s;\n    }\n\n    /* Optical Path Difference */\n    // float D = 2.0f * eta2 * film_thickness / 1000.0f * cos_theta_2;\n    float D = material.thin_film_thickness / 1000.0f * cos_theta_2;\n\n    /* Variables */\n    float phi21p;\n    float phi21s;\n    float phi23p;\n    float phi23s;\n\n    /* Evaluate the phase shift */\n    fresnel_phase(HoL, eta1, eta2, 0.0f, phi21p, phi21s);\n    fresnel_phase(cos_theta_2, eta2, eta3, kappa3, phi23p, phi23s);\n    phi21p = M_PI - phi21p;\n    phi21s = M_PI - phi21s;\n\n    float r123p = sqrt(R12p * R23p);\n    float r123s = sqrt(R12s * R23s);\n\n    /* Iridescence term using spectral antialiasing for Parallel polarization */\n    // Reflectance term for m=0 (DC term amplitude)\n    float Rs = (hippt::square(T121p) * R23p) / (1.0f - R12p * R23p);\n    float C0 = R12p + Rs;\n    \n    ColorRGB32F I = ColorRGB32F(C0);\n    ColorRGB32F Sm;\n\n    // Reflectance term for m>0 (pairs of diracs)\n    float Cm = Rs - T121p;\n    for (int m = 1; m <= 2; ++m)\n    {\n        Cm *= r123p;\n        Sm = 2.0f * eval_sensitivity(m * D, m * (phi23p + phi21p));\n        I += Cm * Sm;\n    }\n\n    /* Iridescence term using spectral antialiasing for Perpendicular polarization */\n    // Reflectance term for m=0 (DC term amplitude)\n    Rs = (hippt::square(T121s) * R23s) / (1.0f - R12s * R23s);\n    C0 = R12s + Rs;\n    I += ColorRGB32F(C0);\n\n    // Reflectance term for m>0 (pairs of diracs)\n    Cm = Rs - T121s;\n    for (int m = 1; m <= 2; ++m)\n    {\n        Cm *= r123s;\n        Sm = 2.0f * eval_sensitivity(m * D, m * (phi23s + phi21s));\n        I += Cm * Sm;\n    }\n\n    I *= 0.5f;\n\n    // CIE RGB and CIE XYZ 1931 conversion:\n    // source: https://en.wikipedia.org/wiki/CIE_1931_color_space\n    float r = 2.3646381f * I[0] - 0.8965361f * I[1] - 0.4680737f * I[2];\n    float g = -0.5151664f * I[0] + 1.4264000f * I[1] + 0.0887608f * I[2];\n    float b = 0.0052037f * I[0] - 0.0144081f * I[1] + 1.0092106f * I[2];\n\n    I = ColorRGB32F(r, g, b);\n    I.clamp(0.0f, 1.0f);\n\n    return RGB_hue_shift(I, material.thin_film_hue_shift_degrees * 360.0f);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Dispatcher.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_DISPATCHER_H\n#define DEVICE_DISPATCHER_H\n\n#include \"Device/includes/BSDFs/Lambertian.h\"\n#include \"Device/includes/BSDFs/OrenNayar.h\"\n#include \"Device/includes/BSDFs/Principled.h\"\n#include \"Device/includes/RayPayload.h\"\n\n/**\n * The 'random_number_generator' passed here is used only in case \n * monte-carlo integration of the directional albedo is enabled\n * \n * If 'update_ray_volume_state' is passed as true, the givenargument is passed as nullptr, the volume state of the ray won't\n * be updated by this sample call (i.e. the ray won't track if this sample call made it exit/enter a new material)\n */\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F bsdf_dispatcher_eval(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float& pdf, Xorshift32Generator& random_number_generator)\n{\n#if BSDFOverride == BSDF_NONE || BSDFOverride == BSDF_PRINCIPLED\n\t/*switch (brdf_type)\n\t{\n\t...\n\t...\n\tdefault:\n\t\tbreak;\n\t}*/\n\treturn principled_bsdf_eval(render_data, bsdf_context, pdf);\n#elif BSDFOverride == BSDF_LAMBERTIAN\n\treturn lambertian_brdf_eval(bsdf_context.material, hippt::dot(bsdf_context.to_light_direction, bsdf_context.shading_normal), pdf);\n#elif BSDFOverride == BSDF_OREN_NAYAR\n\treturn oren_nayar_brdf_eval(bsdf_context.material, bsdf_context.view_direction, bsdf_context.shading_normal, bsdf_context.to_light_direction, pdf);\n#endif\n}\n\nHIPRT_DEVICE HIPRT_INLINE float bsdf_dispatcher_pdf(const HIPRTRenderData& render_data, BSDFContext& bsdf_context)\n{\n#if BSDFOverride == BSDF_NONE || BSDFOverride == BSDF_PRINCIPLED\n\t/*switch (brdf_type)\n\t{\n\t...\n\t...\n\tdefault:\n\t\tbreak;\n\t}*/\n\treturn principled_bsdf_pdf(render_data, bsdf_context);\n#elif BSDFOverride == BSDF_LAMBERTIAN\n\treturn lambertian_brdf_pdf(bsdf_context.material, hippt::dot(bsdf_context.to_light_direction, bsdf_context.shading_normal));\n#elif BSDFOverride == BSDF_OREN_NAYAR\n\treturn oren_nayar_brdf_pdf(bsdf_context.material, bsdf_context.view_direction, bsdf_context.shading_normal, bsdf_context.to_light_direction);\n#endif\n}\n\n/**\n * If the 'ray_volume_state' argument is passed as nullptr, the volume state of the ray won't\n * be updated by this sample call (i.e. the ray won't track if this sample call made it exit/enter a new material)\n * \n * If sampleDirectionOnly is 'true',, this function samples only the BSDF without \n * evaluating the contribution or the PDF of the BSDF. This function will then always return\n * ColorRGB32F(0.0f) and the 'pdf' out parameter will always be set to 0.0f\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F bsdf_dispatcher_sample(const HIPRTRenderData& render_data, BSDFContext& bsdf_context, float3& sampled_direction, float& pdf, Xorshift32Generator& random_number_generator)\n{\n#if BSDFOverride == BSDF_NONE || BSDFOverride == BSDF_PRINCIPLED\n\t/*switch (brdf_type)\n\t{\n\t...\n\t...\n\tdefault:\n\t\tbreak;\n\t}*/\n    return principled_bsdf_sample<sampleDirectionOnly>(render_data, bsdf_context, sampled_direction, pdf, random_number_generator);\n#elif BSDFOverride == BSDF_LAMBERTIAN\n\treturn lambertian_brdf_sample<sampleDirectionOnly>(bsdf_context.material, bsdf_context.shading_normal, sampled_direction, pdf, random_number_generator, bsdf_context.incident_light_info);\n#elif BSDFOverride == BSDF_OREN_NAYAR\n\treturn oren_nayar_brdf_sample<sampleDirectionOnly>(material, view_direction, surface_normal, sampled_direction, pdf, random_number_generator, out_sampled_light_info);\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Dispersion.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copXYZ[1]: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_DISPERSION_H\n#define DEVICE_DISPERSION_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#define WAVELENGTH_TO_RGB_FIT 0\n#define WAVELENGTH_TO_RGB_TABLES 1\n#define WavelengthToRGBMethod WAVELENGTH_TO_RGB_FIT\n\n#define MIN_SAMPLE_WAVELENGTH 360\n#define MAX_SAMPLE_WAVELENGTH 830\n\n// We only need all the code that follows if we're using the lookup tables\n#if WavelengthToRGBMethod == WAVELENGTH_TO_RGB_TABLES\n\n#define CIE_1931_samples 471\n#define CIE_1931_MIN 360\n\nconst float CIE_X_entries[CIE_1931_samples] = {\n    0.0001299000f,   0.0001458470f,   0.0001638021f,   0.0001840037f,   0.0002066902f,   0.0002321000f,   0.0002607280f,\n    0.0002930750f,   0.0003293880f,   0.0003699140f,   0.0004149000f,   0.0004641587f,   0.0005189860f,   0.0005818540f,\n    0.0006552347f,   0.0007416000f,   0.0008450296f,   0.0009645268f,   0.001094949f,    0.001231154f,    0.001368000f,\n    0.001502050f,    0.001642328f,    0.001802382f,    0.001995757f,    0.002236000f,    0.002535385f,    0.002892603f,\n    0.003300829f,    0.003753236f,    0.004243000f,    0.004762389f,    0.005330048f,    0.005978712f,    0.006741117f,\n    0.007650000f,    0.008751373f,    0.01002888f,     0.01142170f,     0.01286901f,     0.01431000f,     0.01570443f,\n    0.01714744f,     0.01878122f,     0.02074801f,     0.02319000f,     0.02620736f,     0.02978248f,     0.03388092f,\n    0.03846824f,     0.04351000f,     0.04899560f,     0.05502260f,     0.06171880f,     0.06921200f,     0.07763000f,\n    0.08695811f,     0.09717672f,     0.1084063f,      0.1207672f,      0.1343800f,      0.1493582f,      0.1653957f,\n    0.1819831f,      0.1986110f,      0.2147700f,      0.2301868f,      0.2448797f,      0.2587773f,      0.2718079f,\n    0.2839000f,      0.2949438f,      0.3048965f,      0.3137873f,      0.3216454f,      0.3285000f,      0.3343513f,\n    0.3392101f,      0.3431213f,      0.3461296f,      0.3482800f,      0.3495999f,      0.3501474f,      0.3500130f,\n    0.3492870f,      0.3480600f,      0.3463733f,      0.3442624f,      0.3418088f,      0.3390941f,      0.3362000f,\n    0.3331977f,      0.3300411f,      0.3266357f,      0.3228868f,      0.3187000f,      0.3140251f,      0.3088840f,\n    0.3032904f,      0.2972579f,      0.2908000f,      0.2839701f,      0.2767214f,      0.2689178f,      0.2604227f,\n    0.2511000f,      0.2408475f,      0.2298512f,      0.2184072f,      0.2068115f,      0.1953600f,      0.1842136f,\n    0.1733273f,      0.1626881f,      0.1522833f,      0.1421000f,      0.1321786f,      0.1225696f,      0.1132752f,\n    0.1042979f,      0.09564000f,     0.08729955f,     0.07930804f,     0.07171776f,     0.06458099f,     0.05795001f,\n    0.05186211f,     0.04628152f,     0.04115088f,     0.03641283f,     0.03201000f,     0.02791720f,     0.02414440f,\n    0.02068700f,     0.01754040f,     0.01470000f,     0.01216179f,     0.009919960f,    0.007967240f,    0.006296346f,\n    0.004900000f,    0.003777173f,    0.002945320f,    0.002424880f,    0.002236293f,    0.002400000f,    0.002925520f,\n    0.003836560f,    0.005174840f,    0.006982080f,    0.009300000f,    0.01214949f,     0.01553588f,     0.01947752f,\n    0.02399277f,     0.02910000f,     0.03481485f,     0.04112016f,     0.04798504f,     0.05537861f,     0.06327000f,\n    0.07163501f,     0.08046224f,     0.08973996f,     0.09945645f,     0.1096000f,      0.1201674f,      0.1311145f,\n    0.1423679f,      0.1538542f,      0.1655000f,      0.1772571f,      0.1891400f,      0.2011694f,      0.2133658f,\n    0.2257499f,      0.2383209f,      0.2510668f,      0.2639922f,      0.2771017f,      0.2904000f,      0.3038912f,\n    0.3175726f,      0.3314384f,      0.3454828f,      0.3597000f,      0.3740839f,      0.3886396f,      0.4033784f,\n    0.4183115f,      0.4334499f,      0.4487953f,      0.4643360f,      0.4800640f,      0.4959713f,      0.5120501f,\n    0.5282959f,      0.5446916f,      0.5612094f,      0.5778215f,      0.5945000f,      0.6112209f,      0.6279758f,\n    0.6447602f,      0.6615697f,      0.6784000f,      0.6952392f,      0.7120586f,      0.7288284f,      0.7455188f,\n    0.7621000f,      0.7785432f,      0.7948256f,      0.8109264f,      0.8268248f,      0.8425000f,      0.8579325f,\n    0.8730816f,      0.8878944f,      0.9023181f,      0.9163000f,      0.9297995f,      0.9427984f,      0.9552776f,\n    0.9672179f,      0.9786000f,      0.9893856f,      0.9995488f,      1.0090892f,      1.0180064f,      1.0263000f,\n    1.0339827f,      1.0409860f,      1.0471880f,      1.0524667f,      1.0567000f,      1.0597944f,      1.0617992f,\n    1.0628068f,      1.0629096f,      1.0622000f,      1.0607352f,      1.0584436f,      1.0552244f,      1.0509768f,\n    1.0456000f,      1.0390369f,      1.0313608f,      1.0226662f,      1.0130477f,      1.0026000f,      0.9913675f,\n    0.9793314f,      0.9664916f,      0.9528479f,      0.9384000f,      0.9231940f,      0.9072440f,      0.8905020f,\n    0.8729200f,      0.8544499f,      0.8350840f,      0.8149460f,      0.7941860f,      0.7729540f,      0.7514000f,\n    0.7295836f,      0.7075888f,      0.6856022f,      0.6638104f,      0.6424000f,      0.6215149f,      0.6011138f,\n    0.5811052f,      0.5613977f,      0.5419000f,      0.5225995f,      0.5035464f,      0.4847436f,      0.4661939f,\n    0.4479000f,      0.4298613f,      0.4120980f,      0.3946440f,      0.3775333f,      0.3608000f,      0.3444563f,\n    0.3285168f,      0.3130192f,      0.2980011f,      0.2835000f,      0.2695448f,      0.2561184f,      0.2431896f,\n    0.2307272f,      0.2187000f,      0.2070971f,      0.1959232f,      0.1851708f,      0.1748323f,      0.1649000f,\n    0.1553667f,      0.1462300f,      0.1374900f,      0.1291467f,      0.1212000f,      0.1136397f,      0.1064650f,\n    0.09969044f,     0.09333061f,     0.08740000f,     0.08190096f,     0.07680428f,     0.07207712f,     0.06768664f,\n    0.06360000f,     0.05980685f,     0.05628216f,     0.05297104f,     0.04981861f,     0.04677000f,     0.04378405f,\n    0.04087536f,     0.03807264f,     0.03540461f,     0.03290000f,     0.03056419f,     0.02838056f,     0.02634484f,\n    0.02445275f,     0.02270000f,     0.02108429f,     0.01959988f,     0.01823732f,     0.01698717f,     0.01584000f,\n    0.01479064f,     0.01383132f,     0.01294868f,     0.01212920f,     0.01135916f,     0.01062935f,     0.009938846f,\n    0.009288422f,    0.008678854f,    0.008110916f,    0.007582388f,    0.007088746f,    0.006627313f,    0.006195408f,\n    0.005790346f,    0.005409826f,    0.005052583f,    0.004717512f,    0.004403507f,    0.004109457f,    0.003833913f,\n    0.003575748f,    0.003334342f,    0.003109075f,    0.002899327f,    0.002704348f,    0.002523020f,    0.002354168f,\n    0.002196616f,    0.002049190f,    0.001910960f,    0.001781438f,    0.001660110f,    0.001546459f,    0.001439971f,\n    0.001340042f,    0.001246275f,    0.001158471f,    0.001076430f,    0.0009999493f,   0.0009287358f,   0.0008624332f,\n    0.0008007503f,   0.0007433960f,   0.0006900786f,   0.0006405156f,   0.0005945021f,   0.0005518646f,   0.0005124290f,\n    0.0004760213f,   0.0004424536f,   0.0004115117f,   0.0003829814f,   0.0003566491f,   0.0003323011f,   0.0003097586f,\n    0.0002888871f,   0.0002695394f,   0.0002515682f,   0.0002348261f,   0.0002191710f,   0.0002045258f,   0.0001908405f,\n    0.0001780654f,   0.0001661505f,   0.0001550236f,   0.0001446219f,   0.0001349098f,   0.0001258520f,   0.0001174130f,\n    0.0001095515f,   0.0001022245f,   0.00009539445f,  0.00008902390f,  0.00008307527f,  0.00007751269f,  0.00007231304f,\n    0.00006745778f,  0.00006292844f,  0.00005870652f,  0.00005477028f,  0.00005109918f,  0.00004767654f,  0.00004448567f,\n    0.00004150994f,  0.00003873324f,  0.00003614203f,  0.00003372352f,  0.00003146487f,  0.00002935326f,  0.00002737573f,\n    0.00002552433f,  0.00002379376f,  0.00002217870f,  0.00002067383f,  0.00001927226f,  0.00001796640f,  0.00001674991f,\n    0.00001561648f,  0.00001455977f,  0.00001357387f,  0.00001265436f,  0.00001179723f,  0.00001099844f,  0.00001025398f,\n    0.000009559646f, 0.000008912044f, 0.000008308358f, 0.000007745769f, 0.000007221456f, 0.000006732475f, 0.000006276423f,\n    0.000005851304f, 0.000005455118f, 0.000005085868f, 0.000004741466f, 0.000004420236f, 0.000004120783f, 0.000003841716f,\n    0.000003581652f, 0.000003339127f, 0.000003112949f, 0.000002902121f, 0.000002705645f, 0.000002522525f, 0.000002351726f,\n    0.000002192415f, 0.000002043902f, 0.000001905497f, 0.000001776509f, 0.000001656215f, 0.000001544022f, 0.000001439440f,\n    0.000001341977f, 0.000001251141f};\nconst float CIE_Y_entries[CIE_1931_samples] = {\n    0.000003917000f,  0.000004393581f,  0.000004929604f,  0.000005532136f,  0.000006208245f,  0.000006965000f,\n    0.000007813219f,  0.000008767336f,  0.000009839844f,  0.00001104323f,   0.00001239000f,   0.00001388641f,\n    0.00001555728f,   0.00001744296f,   0.00001958375f,   0.00002202000f,   0.00002483965f,   0.00002804126f,\n    0.00003153104f,   0.00003521521f,   0.00003900000f,   0.00004282640f,   0.00004691460f,   0.00005158960f,\n    0.00005717640f,   0.00006400000f,   0.00007234421f,   0.00008221224f,   0.00009350816f,   0.0001061361f,\n    0.0001200000f,    0.0001349840f,    0.0001514920f,    0.0001702080f,    0.0001918160f,    0.0002170000f,\n    0.0002469067f,    0.0002812400f,    0.0003185200f,    0.0003572667f,    0.0003960000f,    0.0004337147f,\n    0.0004730240f,    0.0005178760f,    0.0005722187f,    0.0006400000f,    0.0007245600f,    0.0008255000f,\n    0.0009411600f,    0.001069880f,     0.001210000f,     0.001362091f,     0.001530752f,     0.001720368f,\n    0.001935323f,     0.002180000f,     0.002454800f,     0.002764000f,     0.003117800f,     0.003526400f,\n    0.004000000f,     0.004546240f,     0.005159320f,     0.005829280f,     0.006546160f,     0.007300000f,\n    0.008086507f,     0.008908720f,     0.009767680f,     0.01066443f,      0.01160000f,      0.01257317f,\n    0.01358272f,      0.01462968f,      0.01571509f,      0.01684000f,      0.01800736f,      0.01921448f,\n    0.02045392f,      0.02171824f,      0.02300000f,      0.02429461f,      0.02561024f,      0.02695857f,\n    0.02835125f,      0.02980000f,      0.03131083f,      0.03288368f,      0.03452112f,      0.03622571f,\n    0.03800000f,      0.03984667f,      0.04176800f,      0.04376600f,      0.04584267f,      0.04800000f,\n    0.05024368f,      0.05257304f,      0.05498056f,      0.05745872f,      0.06000000f,      0.06260197f,\n    0.06527752f,      0.06804208f,      0.07091109f,      0.07390000f,      0.07701600f,      0.08026640f,\n    0.08366680f,      0.08723280f,      0.09098000f,      0.09491755f,      0.09904584f,      0.1033674f,\n    0.1078846f,       0.1126000f,       0.1175320f,       0.1226744f,       0.1279928f,       0.1334528f,\n    0.1390200f,       0.1446764f,       0.1504693f,       0.1564619f,       0.1627177f,       0.1693000f,\n    0.1762431f,       0.1835581f,       0.1912735f,       0.1994180f,       0.2080200f,       0.2171199f,\n    0.2267345f,       0.2368571f,       0.2474812f,       0.2586000f,       0.2701849f,       0.2822939f,\n    0.2950505f,       0.3085780f,       0.3230000f,       0.3384021f,       0.3546858f,       0.3716986f,\n    0.3892875f,       0.4073000f,       0.4256299f,       0.4443096f,       0.4633944f,       0.4829395f,\n    0.5030000f,       0.5235693f,       0.5445120f,       0.5656900f,       0.5869653f,       0.6082000f,\n    0.6293456f,       0.6503068f,       0.6708752f,       0.6908424f,       0.7100000f,       0.7281852f,\n    0.7454636f,       0.7619694f,       0.7778368f,       0.7932000f,       0.8081104f,       0.8224962f,\n    0.8363068f,       0.8494916f,       0.8620000f,       0.8738108f,       0.8849624f,       0.8954936f,\n    0.9054432f,       0.9148501f,       0.9237348f,       0.9320924f,       0.9399226f,       0.9472252f,\n    0.9540000f,       0.9602561f,       0.9660074f,       0.9712606f,       0.9760225f,       0.9803000f,\n    0.9840924f,       0.9874812f,       0.9903128f,       0.9928116f,       0.9949501f,       0.9967108f,\n    0.9980983f,       0.9991120f,       0.9997482f,       1.0000000f,       0.9998567f,       0.9993046f,\n    0.9983255f,       0.9968987f,       0.9950000f,       0.9926005f,       0.9897426f,       0.9864444f,\n    0.9827241f,       0.9786000f,       0.9740837f,       0.9691712f,       0.9638568f,       0.9581349f,\n    0.9520000f,       0.9454504f,       0.9384992f,       0.9311628f,       0.9234576f,       0.9154000f,\n    0.9070064f,       0.8982772f,       0.8892048f,       0.8797816f,       0.8700000f,       0.8598613f,\n    0.8493920f,       0.8386220f,       0.8275813f,       0.8163000f,       0.8047947f,       0.7930820f,\n    0.7811920f,       0.7691547f,       0.7570000f,       0.7447541f,       0.7324224f,       0.7200036f,\n    0.7074965f,       0.6949000f,       0.6822192f,       0.6694716f,       0.6566744f,       0.6438448f,\n    0.6310000f,       0.6181555f,       0.6053144f,       0.5924756f,       0.5796379f,       0.5668000f,\n    0.5539611f,       0.5411372f,       0.5283528f,       0.5156323f,       0.5030000f,       0.4904688f,\n    0.4780304f,       0.4656776f,       0.4534032f,       0.4412000f,       0.4290800f,       0.4170360f,\n    0.4050320f,       0.3930320f,       0.3810000f,       0.3689184f,       0.3568272f,       0.3447768f,\n    0.3328176f,       0.3210000f,       0.3093381f,       0.2978504f,       0.2865936f,       0.2756245f,\n    0.2650000f,       0.2547632f,       0.2448896f,       0.2353344f,       0.2260528f,       0.2170000f,\n    0.2081616f,       0.1995488f,       0.1911552f,       0.1829744f,       0.1750000f,       0.1672235f,\n    0.1596464f,       0.1522776f,       0.1451259f,       0.1382000f,       0.1315003f,       0.1250248f,\n    0.1187792f,       0.1127691f,       0.1070000f,       0.1014762f,       0.09618864f,      0.09112296f,\n    0.08626485f,      0.08160000f,      0.07712064f,      0.07282552f,      0.06871008f,      0.06476976f,\n    0.06100000f,      0.05739621f,      0.05395504f,      0.05067376f,      0.04754965f,      0.04458000f,\n    0.04175872f,      0.03908496f,      0.03656384f,      0.03420048f,      0.03200000f,      0.02996261f,\n    0.02807664f,      0.02632936f,      0.02470805f,      0.02320000f,      0.02180077f,      0.02050112f,\n    0.01928108f,      0.01812069f,      0.01700000f,      0.01590379f,      0.01483718f,      0.01381068f,\n    0.01283478f,      0.01192000f,      0.01106831f,      0.01027339f,      0.009533311f,     0.008846157f,\n    0.008210000f,     0.007623781f,     0.007085424f,     0.006591476f,     0.006138485f,     0.005723000f,\n    0.005343059f,     0.004995796f,     0.004676404f,     0.004380075f,     0.004102000f,     0.003838453f,\n    0.003589099f,     0.003354219f,     0.003134093f,     0.002929000f,     0.002738139f,     0.002559876f,\n    0.002393244f,     0.002237275f,     0.002091000f,     0.001953587f,     0.001824580f,     0.001703580f,\n    0.001590187f,     0.001484000f,     0.001384496f,     0.001291268f,     0.001204092f,     0.001122744f,\n    0.001047000f,     0.0009765896f,    0.0009111088f,    0.0008501332f,    0.0007932384f,    0.0007400000f,\n    0.0006900827f,    0.0006433100f,    0.0005994960f,    0.0005584547f,    0.0005200000f,    0.0004839136f,\n    0.0004500528f,    0.0004183452f,    0.0003887184f,    0.0003611000f,    0.0003353835f,    0.0003114404f,\n    0.0002891656f,    0.0002684539f,    0.0002492000f,    0.0002313019f,    0.0002146856f,    0.0001992884f,\n    0.0001850475f,    0.0001719000f,    0.0001597781f,    0.0001486044f,    0.0001383016f,    0.0001287925f,\n    0.0001200000f,    0.0001118595f,    0.0001043224f,    0.00009733560f,   0.00009084587f,   0.00008480000f,\n    0.00007914667f,   0.00007385800f,   0.00006891600f,   0.00006430267f,   0.00006000000f,   0.00005598187f,\n    0.00005222560f,   0.00004871840f,   0.00004544747f,   0.00004240000f,   0.00003956104f,   0.00003691512f,\n    0.00003444868f,   0.00003214816f,   0.00003000000f,   0.00002799125f,   0.00002611356f,   0.00002436024f,\n    0.00002272461f,   0.00002120000f,   0.00001977855f,   0.00001845285f,   0.00001721687f,   0.00001606459f,\n    0.00001499000f,   0.00001398728f,   0.00001305155f,   0.00001217818f,   0.00001136254f,   0.00001060000f,\n    0.000009885877f,  0.000009217304f,  0.000008592362f,  0.000008009133f,  0.000007465700f,  0.000006959567f,\n    0.000006487995f,  0.000006048699f,  0.000005639396f,  0.000005257800f,  0.000004901771f,  0.000004569720f,\n    0.000004260194f,  0.000003971739f,  0.000003702900f,  0.000003452163f,  0.000003218302f,  0.000003000300f,\n    0.000002797139f,  0.000002607800f,  0.000002431220f,  0.000002266531f,  0.000002113013f,  0.000001969943f,\n    0.000001836600f,  0.000001712230f,  0.000001596228f,  0.000001488090f,  0.000001387314f,  0.000001293400f,\n    0.000001205820f,  0.000001124143f,  0.000001048009f,  0.0000009770578f, 0.0000009109300f, 0.0000008492513f,\n    0.0000007917212f, 0.0000007380904f, 0.0000006881098f, 0.0000006415300f, 0.0000005980895f, 0.0000005575746f,\n    0.0000005198080f, 0.0000004846123f, 0.0000004518100f};\n\nconst float CIE_Z_entries[CIE_1931_samples] = {\n    0.0006061000f, 0.0006808792f, 0.0007651456f, 0.0008600124f,\n    0.0009665928f, 0.001086000f, 0.001220586f, 0.001372729f,\n    0.001543579f, 0.001734286f, 0.001946000f, 0.002177777f,\n    0.002435809f, 0.002731953f, 0.003078064f, 0.003486000f,\n    0.003975227f, 0.004540880f, 0.005158320f, 0.005802907f,\n    0.006450001f, 0.007083216f, 0.007745488f, 0.008501152f,\n    0.009414544f, 0.01054999f, 0.01196580f, 0.01365587f,\n    0.01558805f, 0.01773015f, 0.02005001f, 0.02251136f,\n    0.02520288f, 0.02827972f, 0.03189704f, 0.03621000f,\n    0.04143771f, 0.04750372f, 0.05411988f, 0.06099803f,\n    0.06785001f, 0.07448632f, 0.08136156f, 0.08915364f,\n    0.09854048f, 0.1102000f, 0.1246133f, 0.1417017f,\n    0.1613035f, 0.1832568f, 0.2074000f, 0.2336921f,\n    0.2626114f, 0.2947746f, 0.3307985f, 0.3713000f,\n    0.4162091f, 0.4654642f, 0.5196948f, 0.5795303f,\n    0.6456000f, 0.7184838f, 0.7967133f, 0.8778459f,\n    0.9594390f, 1.0390501f, 1.1153673f, 1.1884971f,\n    1.2581233f, 1.3239296f, 1.3856000f, 1.4426352f,\n    1.4948035f, 1.5421903f, 1.5848807f, 1.6229600f,\n    1.6564048f, 1.6852959f, 1.7098745f, 1.7303821f,\n    1.7470600f, 1.7600446f, 1.7696233f, 1.7762637f,\n    1.7804334f, 1.7826000f, 1.7829682f, 1.7816998f,\n    1.7791982f, 1.7758671f, 1.7721100f, 1.7682589f,\n    1.7640390f, 1.7589438f, 1.7524663f, 1.7441000f,\n    1.7335595f, 1.7208581f, 1.7059369f, 1.6887372f,\n    1.6692000f, 1.6475287f, 1.6234127f, 1.5960223f,\n    1.5645280f, 1.5281000f, 1.4861114f, 1.4395215f,\n    1.3898799f, 1.3387362f, 1.2876400f, 1.2374223f,\n    1.1878243f, 1.1387611f, 1.0901480f, 1.0419000f,\n    0.9941976f, 0.9473473f, 0.9014531f, 0.8566193f,\n    0.8129501f, 0.7705173f, 0.7294448f, 0.6899136f,\n    0.6521049f, 0.6162000f, 0.5823286f, 0.5504162f,\n    0.5203376f, 0.4919673f, 0.4651800f, 0.4399246f,\n    0.4161836f, 0.3938822f, 0.3729459f, 0.3533000f,\n    0.3348578f, 0.3175521f, 0.3013375f, 0.2861686f,\n    0.2720000f, 0.2588171f, 0.2464838f, 0.2347718f,\n    0.2234533f, 0.2123000f, 0.2011692f, 0.1901196f,\n    0.1792254f, 0.1685608f, 0.1582000f, 0.1481383f,\n    0.1383758f, 0.1289942f, 0.1200751f, 0.1117000f,\n    0.1039048f, 0.09666748f, 0.08998272f, 0.08384531f,\n    0.07824999f, 0.07320899f, 0.06867816f, 0.06456784f,\n    0.06078835f, 0.05725001f, 0.05390435f, 0.05074664f,\n    0.04775276f, 0.04489859f, 0.04216000f, 0.03950728f,\n    0.03693564f, 0.03445836f, 0.03208872f, 0.02984000f,\n    0.02771181f, 0.02569444f, 0.02378716f, 0.02198925f,\n    0.02030000f, 0.01871805f, 0.01724036f, 0.01586364f,\n    0.01458461f, 0.01340000f, 0.01230723f, 0.01130188f,\n    0.01037792f, 0.009529306f, 0.008749999f, 0.008035200f,\n    0.007381600f, 0.006785400f, 0.006242800f, 0.005749999f,\n    0.005303600f, 0.004899800f, 0.004534200f, 0.004202400f,\n    0.003900000f, 0.003623200f, 0.003370600f, 0.003141400f,\n    0.002934800f, 0.002749999f, 0.002585200f, 0.002438600f,\n    0.002309400f, 0.002196800f, 0.002100000f, 0.002017733f,\n    0.001948200f, 0.001889800f, 0.001840933f, 0.001800000f,\n    0.001766267f, 0.001737800f, 0.001711200f, 0.001683067f,\n    0.001650001f, 0.001610133f, 0.001564400f, 0.001513600f,\n    0.001458533f, 0.001400000f, 0.001336667f, 0.001270000f,\n    0.001205000f, 0.001146667f, 0.001100000f, 0.001068800f,\n    0.001049400f, 0.001035600f, 0.001021200f, 0.001000000f,\n    0.0009686400f, 0.0009299200f, 0.0008868800f, 0.0008425600f,\n    0.0008000000f, 0.0007609600f, 0.0007236800f, 0.0006859200f,\n    0.0006454400f, 0.0006000000f, 0.0005478667f, 0.0004916000f,\n    0.0004354000f, 0.0003834667f, 0.0003400000f, 0.0003072533f,\n    0.0002831600f, 0.0002654400f, 0.0002518133f, 0.0002400000f,\n    0.0002295467f, 0.0002206400f, 0.0002119600f, 0.0002021867f,\n    0.0001900000f, 0.0001742133f, 0.0001556400f, 0.0001359600f,\n    0.0001168533f, 0.0001000000f, 0.00008613333f, 0.00007460000f,\n    0.00006500000f, 0.00005693333f, 0.00004999999f, 0.00004416000f,\n    0.00003948000f, 0.00003572000f, 0.00003264000f, 0.00003000000f,\n    0.00002765333f, 0.00002556000f, 0.00002364000f, 0.00002181333f,\n    0.00002000000f, 0.00001813333f, 0.00001620000f, 0.00001420000f,\n    0.00001213333f, 0.00001000000f, 0.000007733333f, 0.000005400000f,\n    0.000003200000f, 0.000001333333f, 0.000000000000f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f, 0.0f,\n    0.0f, 0.0f, 0.0f\n};\n\n#define D65_Samples 531\n#define D65_MIN 300\n#define D65_MAX (D65_MIN + D65_Samples - 1)\n\nconst float D65_SPD[] = {\n    0.0341f,  0.36014f, 0.68618f, 1.01222f, 1.33826f, 1.6643f,  1.99034f, 2.31638f, 2.64242f, 2.96846f, 3.2945f,  4.98865f, 6.6828f,\n    8.37695f, 10.0711f, 11.7652f, 13.4594f, 15.1535f, 16.8477f, 18.5418f, 20.236f,  21.9177f, 23.5995f, 25.2812f, 26.963f,  28.6447f,\n    30.3265f, 32.0082f, 33.69f,   35.3717f, 37.0535f, 37.343f,  37.6326f, 37.9221f, 38.2116f, 38.5011f, 38.7907f, 39.0802f, 39.3697f,\n    39.6593f, 39.9488f, 40.4451f, 40.9414f, 41.4377f, 41.934f,  42.4302f, 42.9265f, 43.4228f, 43.9191f, 44.4154f, 44.9117f, 45.0844f,\n    45.257f,  45.4297f, 45.6023f, 45.775f,  45.9477f, 46.1203f, 46.293f,  46.4656f, 46.6383f, 47.1834f, 47.7285f, 48.2735f, 48.8186f,\n    49.3637f, 49.9088f, 50.4539f, 50.9989f, 51.544f,  52.0891f, 51.8777f, 51.6664f, 51.455f,  51.2437f, 51.0323f, 50.8209f, 50.6096f,\n    50.3982f, 50.1869f, 49.9755f, 50.4428f, 50.91f,   51.3773f, 51.8446f, 52.3118f, 52.7791f, 53.2464f, 53.7137f, 54.1809f, 54.6482f,\n    57.4589f, 60.2695f, 63.0802f, 65.8909f, 68.7015f, 71.5122f, 74.3229f, 77.1336f, 79.9442f, 82.7549f, 83.628f,  84.5011f, 85.3742f,\n    86.2473f, 87.1204f, 87.9936f, 88.8667f, 89.7398f, 90.6129f, 91.486f,  91.6806f, 91.8752f, 92.0697f, 92.2643f, 92.4589f, 92.6535f,\n    92.8481f, 93.0426f, 93.2372f, 93.4318f, 92.7568f, 92.0819f, 91.4069f, 90.732f,  90.057f,  89.3821f, 88.7071f, 88.0322f, 87.3572f,\n    86.6823f, 88.5006f, 90.3188f, 92.1371f, 93.9554f, 95.7736f, 97.5919f, 99.4102f, 101.228f, 103.047f, 104.865f, 106.079f, 107.294f,\n    108.508f, 109.722f, 110.936f, 112.151f, 113.365f, 114.579f, 115.794f, 117.008f, 117.088f, 117.169f, 117.249f, 117.33f,  117.41f,\n    117.49f,  117.571f, 117.651f, 117.732f, 117.812f, 117.517f, 117.222f, 116.927f, 116.632f, 116.336f, 116.041f, 115.746f, 115.451f,\n    115.156f, 114.861f, 114.967f, 115.073f, 115.18f,  115.286f, 115.392f, 115.498f, 115.604f, 115.711f, 115.817f, 115.923f, 115.212f,\n    114.501f, 113.789f, 113.078f, 112.367f, 111.656f, 110.945f, 110.233f, 109.522f, 108.811f, 108.865f, 108.92f,  108.974f, 109.028f,\n    109.082f, 109.137f, 109.191f, 109.245f, 109.3f,   109.354f, 109.199f, 109.044f, 108.888f, 108.733f, 108.578f, 108.423f, 108.268f,\n    108.112f, 107.957f, 107.802f, 107.501f, 107.2f,   106.898f, 106.597f, 106.296f, 105.995f, 105.694f, 105.392f, 105.091f, 104.79f,\n    105.08f,  105.37f,  105.66f,  105.95f,  106.239f, 106.529f, 106.819f, 107.109f, 107.399f, 107.689f, 107.361f, 107.032f, 106.704f,\n    106.375f, 106.047f, 105.719f, 105.39f,  105.062f, 104.733f, 104.405f, 104.369f, 104.333f, 104.297f, 104.261f, 104.225f, 104.19f,\n    104.154f, 104.118f, 104.082f, 104.046f, 103.641f, 103.237f, 102.832f, 102.428f, 102.023f, 101.618f, 101.214f, 100.809f, 100.405f,\n    100.0f,     99.6334f, 99.2668f, 98.9003f, 98.5337f, 98.1671f, 97.8005f, 97.4339f, 97.0674f, 96.7008f, 96.3342f, 96.2796f, 96.225f,\n    96.1703f, 96.1157f, 96.0611f, 96.0065f, 95.9519f, 95.8972f, 95.8426f, 95.788f,  95.0778f, 94.3675f, 93.6573f, 92.947f,  92.2368f,\n    91.5266f, 90.8163f, 90.1061f, 89.3958f, 88.6856f, 88.8177f, 88.9497f, 89.0818f, 89.2138f, 89.3459f, 89.478f,  89.61f,   89.7421f,\n    89.8741f, 90.0062f, 89.9655f, 89.9248f, 89.8841f, 89.8434f, 89.8026f, 89.7619f, 89.7212f, 89.6805f, 89.6398f, 89.5991f, 89.4091f,\n    89.219f,  89.029f,  88.8389f, 88.6489f, 88.4589f, 88.2688f, 88.0788f, 87.8887f, 87.6987f, 87.2577f, 86.8167f, 86.3757f, 85.9347f,\n    85.4936f, 85.0526f, 84.6116f, 84.1706f, 83.7296f, 83.2886f, 83.3297f, 83.3707f, 83.4118f, 83.4528f, 83.4939f, 83.535f,  83.576f,\n    83.6171f, 83.6581f, 83.6992f, 83.332f,  82.9647f, 82.5975f, 82.2302f, 81.863f,  81.4958f, 81.1285f, 80.7613f, 80.394f,  80.0268f,\n    80.0456f, 80.0644f, 80.0831f, 80.1019f, 80.1207f, 80.1395f, 80.1583f, 80.177f,  80.1958f, 80.2146f, 80.4209f, 80.6272f, 80.8336f,\n    81.0399f, 81.2462f, 81.4525f, 81.6588f, 81.8652f, 82.0715f, 82.2778f, 81.8784f, 81.4791f, 81.0797f, 80.6804f, 80.281f,  79.8816f,\n    79.4823f, 79.0829f, 78.6836f, 78.2842f, 77.4279f, 76.5716f, 75.7153f, 74.859f,  74.0027f, 73.1465f, 72.2902f, 71.4339f, 70.5776f,\n    69.7213f, 69.9101f, 70.0989f, 70.2876f, 70.4764f, 70.6652f, 70.854f,  71.0428f, 71.2315f, 71.4203f, 71.6091f, 71.8831f, 72.1571f,\n    72.4311f, 72.7051f, 72.979f,  73.253f,  73.527f,  73.801f,  74.075f,  74.349f,  73.0745f, 71.8f,    70.5255f, 69.251f,  67.9765f,\n    66.702f,  65.4275f, 64.153f,  62.8785f, 61.604f,  62.4322f, 63.2603f, 64.0885f, 64.9166f, 65.7448f, 66.573f,  67.4011f, 68.2293f,\n    69.0574f, 69.8856f, 70.4057f, 70.9259f, 71.446f,  71.9662f, 72.4863f, 73.0064f, 73.5266f, 74.0467f, 74.5669f, 75.087f,  73.9376f,\n    72.7881f, 71.6387f, 70.4893f, 69.3398f, 68.1904f, 67.041f,  65.8916f, 64.7421f, 63.5927f, 61.8752f, 60.1578f, 58.4403f, 56.7229f,\n    55.0054f, 53.288f,  51.5705f, 49.8531f, 48.1356f, 46.4182f, 48.4569f, 50.4956f, 52.5344f, 54.5731f, 56.6118f, 58.6505f, 60.6892f,\n    62.728f,  64.7667f, 66.8054f, 66.4631f, 66.1209f, 65.7786f, 65.4364f, 65.0941f, 64.7518f, 64.4096f, 64.0673f, 63.7251f, 63.3828f,\n    63.4749f, 63.567f,  63.6592f, 63.7513f, 63.8434f, 63.9355f, 64.0276f, 64.1198f, 64.2119f, 64.304f,  63.8188f, 63.3336f, 62.8484f,\n    62.3632f, 61.8779f, 61.3927f, 60.9075f, 60.4223f, 59.9371f, 59.4519f, 58.7026f, 57.9533f, 57.204f,  56.4547f, 55.7054f, 54.9562f,\n    54.2069f, 53.4576f, 52.7083f, 51.959f,  52.5072f, 53.0553f, 53.6035f, 54.1516f, 54.6998f, 55.248f,  55.7961f, 56.3443f, 56.8924f,\n    57.4406f, 57.7278f, 58.015f,  58.3022f, 58.5894f, 58.8765f, 59.1637f, 59.4509f, 59.7381f, 60.0253f, 60.3125f};\n\n/**\n * Converts an XYZ value to the sRGB color space (linear, no gamma correction is applied)\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F XYZ_to_sRGB(const ColorRGB32F& XYZ)\n{\n    /**\n     * Reference: https://en.wikipedia.org/wiki/SRGB#Correspondence_to_CIE_XYZ_stimulus\n     */\n    float r = 3.240479f * XYZ[0] + -1.537150f * XYZ[1] + -0.498535f * XYZ[2];\n    float g = -0.969256f * XYZ[0] + 1.875991f * XYZ[1] + 0.041556f * XYZ[2];\n    float b = 0.055648f * XYZ[0] + -0.204043f * XYZ[1] + 1.057311f * XYZ[2];\n\n    return ColorRGB32F(r, g, b);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F wavelength_to_XYZ(float wavelength)\n{\n    ColorRGB32F XYZ;\n\n    float index_float = wavelength - CIE_1931_MIN;\n    int index_low = hippt::max((int)index_float - 1, 0);\n    int index_high = hippt::min(index_low + 1, CIE_1931_samples - 1);\n    float t = wavelength - (int)wavelength;\n\n    float x1 = CIE_X_entries[index_low];\n    float x2 = CIE_X_entries[index_high];\n    float y1 = CIE_Y_entries[index_low];\n    float y2 = CIE_Y_entries[index_high];\n    float z1 = CIE_Z_entries[index_low];\n    float z2 = CIE_Z_entries[index_high];\n\n    XYZ.r = hippt::lerp(x1, x2, t);\n    XYZ.g = hippt::lerp(y1, y2, t);\n    XYZ.b = hippt::lerp(z1, z2, t);\n\n    // Now scaling by the intensity of the D65 illuminant (which is the point of sRGB)\n    int wavelength_index = hippt::min((int)roundf(wavelength), D65_MAX);\n\n    float SPD = D65_SPD[wavelength_index - D65_MIN];\n    XYZ *= SPD;\n    // Average intensity of the D65 illuminant over its wavelengths\n    XYZ /= 22.2175f;\n\n    return XYZ;\n}\n\n/**\n * Returns the RGB color of a given wavelength using the CIE 1931 2° Observer CMFs\n * to get the wavelength to XYZ.\n * \n * The XYZ value of the wavelength is then multiplied by the intensity of the D65\n * illuminant at that wavelength.\n * \n * That scaled XYZ value is then brought to the sRGB color space (linear sRGB, this is not gamma corrected)\n * using the matrix readily available on Wikipedia: https://en.wikipedia.org/wiki/SRGB#Correspondence_to_CIE_XYZ_stimulus\n * \n * The resulting RGB value is clamped to 0 to avoid negative values and then that clamped RGB\n * is then normalized such that the average of the clamped RGB values of wavelengths\n * between 360 and 830 is RGB(1.0f, 1.0f, 1.0f)\n * \n * For rendering purposes, we should only clamp the average of the non-clamped RGB values. We should not\n * clamp the individual RGB values themselves. But I found that this introduced so instabilities + we then\n * have to handle negative values in the system so that's annoying. \n * \n * So instead we're clamping the individual values but because that's kind of incorrect practice, we end\n * up we an average of RGB values that is not RGB(1.0f, 1.0f, 1.0f). So that's why we're\n * normalizing with the 'scale' factor in the function to bring that average back to RGB(1.0f, 1.0f, 1.0f)\n * \n * This normalization trick actually is imperceptible in practice so I guess it's fine and convenient\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F wavelength_to_RGB_clamped(float wavelength)\n{\n    const ColorRGB32F scale = ColorRGB32F(1.4979f, 1.13591f, 1.13159f);\n    ColorRGB32F RGB = XYZ_to_sRGB(wavelength_to_XYZ(wavelength));\n    RGB.clamp(0, 1.0e35f);\n\n    return RGB / scale;\n}\n\n#endif\n\n/**\n * Fitted curves for converting a wavelength to its RGB values.\n * \n * This is actually a fit of the 'wavelength_to_RGB_clamped()' such that\n * we can get the same results as this function but without the need for lookup tables\n * \n * This function takes wavelengths between 360 and 830nm and returns RGB values such that\n * the average of the RGB values of all wavelengths is RGB(1.0f, 1.0f, 1.0f).\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F wavelength_to_RGB_fit(float wavelength)\n{\n    ColorRGB32F RGB;\n\n    if( wavelength < 463.0f)\n    {\n        RGB.r = -1.2776028240727566e-01f / (1.0f + exp((wavelength - 4.2680623367293401e+02f) / 8.2197460736637176e+00f)) + -1.3925673552505122e-11f * exp((wavelength - 45.0f) / 1.8175459086411596e+01f);\n        RGB.r += 1.2898689750552100e-01f;\n    }\n    else if (wavelength > 553.0f)\n    {\n        RGB.r = 1.7963649137825513e+01f * ( 1.0f / 2.6577826611702449e+01f) * exp(-0.5f * hippt::square((wavelength - 6.0625724092824566e+02f) * (1.0f / 2.6577826611702449e+01f)));\n        RGB.r += 2.5574660155104657e-03f;\n    }\n    else\n        RGB.r = 0.0f;\n\n    RGB.g = 3.4962267376163049e+02f * expf(-0.5f * hippt::square((wavelength-5.4209217455705152e+02f) / -2.9598170255834638e+01f));\n    RGB.g /= wavelength;\n    RGB.b = exp(3.2987659944421112e+03f + (-2.0975839709372405e+05f / wavelength) -4.6368268395094020e+02f * logf(wavelength));\n\n    // The fitting process was done with scaled data so the data is actually\n    // fitted such that the average RGB colors of all wavelength is 0.1. But we want 1.\n    // So we multiply by 10.\n    return RGB * 10.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F wavelength_to_RGB(float wavelength)\n{\n#if WavelengthToRGBMethod == WAVELENGTH_TO_RGB_FIT\n    return wavelength_to_RGB_fit(wavelength);\n#elif WavelengthToRGBMethod == WAVELENGTH_TO_RGB_TABLES\n    return wavelength_to_RGB_clamped(wavelength);\n#endif\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float sample_wavelength_uniformly(Xorshift32Generator& random_number_generator)\n{\n\tfloat r = random_number_generator();\n\n    return r * (MAX_SAMPLE_WAVELENGTH - MIN_SAMPLE_WAVELENGTH) + MIN_SAMPLE_WAVELENGTH;\n}\n\n/**\n * Reference:\n * [1] [Open PBR Specification] https://academysoftwarefoundation.github.io/OpenPBR/#model/basesubstrate/translucentbase\n * \n * Given the dispersion parameters of material and a base IOR\n * (assumed to be the IOR of the material measured at 587.6nm),\n * returns the new IOR of the material but as if measured at the given\n * 'wavelength'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float compute_dispersion_ior(float dispersion_abbe_number, float dispersion_scale, float base_IOR, float wavelength)\n{\n    if (dispersion_scale == 0.0f)\n        return base_IOR;\n\n#define SQUARE_587_6 334777.96f // 587.6^2\n#define POW_MIN2_LAMBDA_F_MINUS_LAMBDA_C 0.00000191038851931481f // 486.1^(-2) - 656.3^(-2)\n\n    float abbe_number = dispersion_abbe_number / dispersion_scale;\n\n    float B = (base_IOR - 1.0f) / (abbe_number * POW_MIN2_LAMBDA_F_MINUS_LAMBDA_C);\n    float A = base_IOR - B / (SQUARE_587_6);\n\n    return A + B / (wavelength * wavelength);\n}\n\n/**\n * Essentially returns the RGB color associated with a wavelength.\n * \n * Only returns the color if the given 'wavelength' is negative.\n * If the wavelength passed is negative, it is negated so that it becomes\n * positive (hence the passing by reference)\n * \n * If the wavelength is positive, this implicitely means that the wavelength\n * throughput filter has already been applied to the ray and should not\n * be applied a second time.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F get_dispersion_ray_color(float& wavelength, float dispersion_scale)\n{\n    if (dispersion_scale == 0.0f)\n        // No dispersion\n        return ColorRGB32F(1.0f);\n\n    if (wavelength >= 0.0f)\n        // Wavelength isn't negative, dispersion wavelength throughput filter\n        // has already been applied\n        return ColorRGB32F(1.0f);\n\n    wavelength *= -1.0f;\n    return wavelength_to_RGB(wavelength);\n}\n\n/**\n * Below are some utility functions that were used to generate the fit of 'wavelength_to_RGB_fit',\n * verify the implementation etc...\n */\n#ifndef __KERNELCC__\n\n#include \"Image/Image.h\"\n\n/**\n * Write CSV files for the R, G and B values at each wavelength.\n * Used for fitting curves\n */\nstatic void write_wavelength_to_RGB_data_to_file()\n{\n    const int nb_samples = 10000;\n\n    std::ofstream R(\"wavelength_to_rgb_R_\" + std::to_string(nb_samples) + \".csv\");\n    std::ofstream G(\"wavelength_to_rgb_G_\" + std::to_string(nb_samples) + \".csv\");\n    std::ofstream B(\"wavelength_to_rgb_B_\" + std::to_string(nb_samples) + \".csv\");\n    float Y_sum = 0.0f;\n    for (int i = 0; i < nb_samples; i++)\n    {\n        float wavelength = MIN_SAMPLE_WAVELENGTH + i * (MAX_SAMPLE_WAVELENGTH - MIN_SAMPLE_WAVELENGTH) / static_cast<float>(nb_samples);\n\n        ColorRGB32F RGB = wavelength_to_RGB(wavelength);\n\n        R << wavelength << \", \" << RGB.r << std::endl;\n        G << wavelength << \", \" << RGB.g << std::endl;\n        B << wavelength << \", \" << RGB.b << std::endl;\n    }\n}\n\n/**\n * Prints the average of the RGB value of all wavelengths.\n * This should always output (1.0f, 1.0f, 1.0f)\n */\nstatic void average_RGB_for_render()\n{\n    ColorRGB32F average_RGB;\n\n    const int  nb_samples = 1000000;\n    for (int i = 0; i < nb_samples; i++)\n    {\n        float wavelength = MIN_SAMPLE_WAVELENGTH + i * (MAX_SAMPLE_WAVELENGTH - MIN_SAMPLE_WAVELENGTH) / static_cast<float>(nb_samples);\n\n        average_RGB += wavelength_to_RGB(wavelength) / nb_samples;\n    }\n\n    std::cout << \"Average RGB of all wavelengths for rendering: \" << average_RGB << std::endl;\n}\n\n/**\n * Computes the RGB values of all wavelengths and write that to a file, producing a rainbow\n * image.\n */\nstatic void write_rainbow_to_file()\n{\n    const int width = 1280;\n    const int height = 720;\n    Image32Bit rainbow(width, height, 3);\n\n    for (int x = 0; x < width; x++)\n    {\n        float t = x / static_cast<float>(width - 1);\n        float wavelength = t * (MAX_SAMPLE_WAVELENGTH - MIN_SAMPLE_WAVELENGTH) + MIN_SAMPLE_WAVELENGTH;\n\n        ColorRGB32F RGB = wavelength_to_RGB(wavelength);\n\n        for (int y = 0; y < height; y++)\n        {\n            rainbow[(y * width + x) * 3 + 0] = RGB.r;\n            rainbow[(y * width + x) * 3 + 1] = RGB.g;\n            rainbow[(y * width + x) * 3 + 2] = RGB.b;\n        }\n    }\n\n    rainbow.write_image_png(\"rainbow.png\");\n}\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/FixIntellisense.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_FIX_INTELLISENSE_H\n#define DEVICE_FIX_INTELLISENSE_H\n\n/*\n * All that is in this file is meant to make Visual Studio's intellisense happy\n * in the kernel code so that we have autocompletion and no \n * red-underlined-stinky-disgusting intellisense error telling us that no, M_PI is not\n * defined (even though it is at compile time for the GPU) etc... blah blah blah\n */\n\n// The HIPRT_KERNEL_SIGNATURE is only useful to help Visual Studio's Intellisense\n// Without this macro, all kernel functions would be declared as:\n// extern \"C\" void __global__ my_function(......)\n// but Visual Studio doesn't like the 'extern \"C\" void __global__' part and it\n// breaks code coloration and autocompletion. It is however required for the shader\n// compiler\n// To circumvent this problem, we're only declaring the functions 'void' when in the text editor\n// (when __KERNELCC__ is not defined) and we're correctly declaring the function with the full\n// attributes when it's the shader compiler processing the function (when __KERNELCC__ is defined)\n// We're also defining blockDim, blockIdx and threadIdx because they are udefined otherwise...\n#ifdef __KERNELCC__\n\n#define GLOBAL_KERNEL_SIGNATURE(returnType) extern \"C\" returnType __global__\n#define DEVICE_KERNEL_SIGNATURE(returnType) extern \"C\" returnType __device__\n\n#else\n\nstruct dummyVec3\n{\n    int x, y, z;\n};\n\nstatic dummyVec3 blockDim, blockIdx, threadIdx, gridDim;\n\n#define GLOBAL_KERNEL_SIGNATURE(returnType) returnType\n#define DEVICE_KERNEL_SIGNATURE(returnType) returnType\n#define __shared__\n#define __restrict__\n\n// TODO move all of this in Math.h\ninline void __syncthreads() {}\ninline void __syncwarp() {}\ninline unsigned int __activemask() { return 1;  }\ninline unsigned int __ballot() { return 1; }\n\n// For using printf in Kernels\n#include <stdio.h>\n#endif // #ifdef __KERNELCC__\n\n#if defined(__KERNELCC__) // GPU\n#define GPU_CPU_ALIGN(n) __align__(n)\n#elif defined(__GNUC__) // GCC\n#define GPU_CPU_ALIGN(n) __attribute__((aligned(n)))\n#elif defined(_MSC_VER) // MSVC\n#define GPU_CPU_ALIGN(n) __declspec(align(n))\n#else\n#error \"Please provide a definition for GPU_CPU_ALIGN macro for your host compiler!\"\n#endif\n\n#endif // FIX_INTELISSENSE_H\n"
  },
  {
    "path": "src/Device/includes/Fresnel.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_FRESNEL_H\n#define DEVICE_FRESNEL_H\n\n#include \"HostDeviceCommon/Color.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float F0_from_eta(float eta_t, float eta_i)\n{\n    float nume_F0 = (eta_t - eta_i);\n    float denom_F0 = (eta_t + eta_i);\n    float F0 = (nume_F0 * nume_F0) / (denom_F0 * denom_F0);\n\n    return F0;\n}\n\n/**\n * relative_eta here is eta_t / eta_i\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float F0_from_eta_t_and_relative_ior(float eta_t, float relative_eta)\n{\n    return F0_from_eta(eta_t, /* eta_i */ eta_t / relative_eta);\n}\n\n/**\n * Schlick's approximation for dielectric fresnel reflectance\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F fresnel_schlick(ColorRGB32F F0, float angle)\n{\n    return F0 + (ColorRGB32F(1.0f) - F0) * hippt::pow_5(1.0f - angle);\n}\n\n/**\n * Full reflectance fresnel dielectric formula\n *\n * 'relative_eta' is eta_t / eta_i = transmitted media IOR / incident media IOR\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float full_fresnel_dielectric(float cos_theta_i, float relative_eta)\n{\n    if (hippt::abs(1.0f - relative_eta) < 1.0e-4f)\n        // relative_eta of 1, no fresnel\n        return 0.0f;\n\n    // Computing cos_theta_t\n    float sin_theta_i2 = 1.0f - cos_theta_i * cos_theta_i;\n    float sin_theta_t2 = sin_theta_i2 / (relative_eta * relative_eta);\n\n    if (sin_theta_t2 >= 1.0f)\n        // Total internal reflection, 0% refraction, all reflection\n        return 1.0f;\n\n    float cos_theta_t = sqrt(1.0f - sin_theta_t2);\n    float r_parallel = (relative_eta * cos_theta_i - cos_theta_t) / (relative_eta * cos_theta_i + cos_theta_t);\n    float r_perpendicular = (cos_theta_i - relative_eta * cos_theta_t) / (cos_theta_i + relative_eta * cos_theta_t);\n    return (r_parallel * r_parallel + r_perpendicular * r_perpendicular) / 2;\n}\n\n/**\n * Override of full_fresnel_dielectric with two separate eta\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float full_fresnel_dielectric(float cos_theta_i, float eta_i, float eta_t)\n{\n    return full_fresnel_dielectric(cos_theta_i, eta_t / eta_i);\n}\n\n/**\n * Computes the reflectance at normal incidence from the two\n * given eta and uses that reflectance to compute the dielectric\n * fresnel reflectance using schlick's approximation\n *\n * This function is basically a shorthand for:\n *      ColorRGB32F F0 = <compute F0 from etas>\n *      return schlick(F0, NoL)\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F fresnel_schlick_from_ior(float eta_i, float eta_t, float cos_theta_i)\n{\n    float F0 = F0_from_eta(eta_t, eta_i);\n\n    return fresnel_schlick(ColorRGB32F(F0), cos_theta_i);\n}\n\n/**\n * Overload with normal and light direction\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F fresnel_schlick_from_ior(float eta_i, float eta_t, const float3& normal, const float3& local_to_light_direction)\n{\n    float NoL = hippt::clamp(1.0e-8f, 1.0f, hippt::dot(normal, local_to_light_direction));\n\n    return fresnel_schlick_from_ior(eta_i, eta_t, NoL);\n}\n\n/**\n * Implementation of [Artist Friendly Metallic Fresnel, Gulbrandsen, 2014] for\n * computing the complex index of refraction of metals from two intuitive color parameters\n * 'reflectivity' and 'edge_tint'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F gulbrandsen_metallic_complex_fresnel(const ColorRGB32F& reflectivity, const ColorRGB32F& edge_tint, float cos_theta_i)\n{\n    // TODO we should precompute k and n on the CPU from 'reflectivity' and 'edge_tint'\n\n\n    // Computing n and k from the 'reflectivity' and 'edge_tint' artist parameters\n    ColorRGB32F one = ColorRGB32F(1.0f);\n    ColorRGB32F sqrt_r = sqrt(reflectivity);\n    ColorRGB32F left_n = edge_tint * ((one - reflectivity) / (one + reflectivity));\n    ColorRGB32F right_n = (one - edge_tint) * ((one + sqrt_r) / (one - sqrt_r));\n    ColorRGB32F n = left_n + right_n;\n\n    ColorRGB32F k_left = n + one;\n    k_left *= k_left;\n    k_left *= reflectivity;\n    ColorRGB32F k_right = n - one;\n    k_right *= k_right;\n    ColorRGB32F k_sqr = (k_left - k_right) / (one - reflectivity);\n\n    // Computing the approximation for non polarized light based on Rs and Rp\n    // for the perpendicular and parallel components of the light\n    ColorRGB32F Rs_nume = n * n + k_sqr - 2.0f * n * cos_theta_i + ColorRGB32F(cos_theta_i * cos_theta_i);\n    ColorRGB32F Rs_denom = n * n + k_sqr + 2.0f * n * cos_theta_i + ColorRGB32F(cos_theta_i * cos_theta_i);\n    ColorRGB32F Rs = Rs_nume / Rs_denom;\n\n    ColorRGB32F Rp_nume = (n * n + k_sqr) * cos_theta_i * cos_theta_i - 2.0f * n * cos_theta_i + one;\n    ColorRGB32F Rp_denom = (n * n + k_sqr) * cos_theta_i * cos_theta_i + 2.0f * n * cos_theta_i + one;\n    ColorRGB32F Rp = Rp_nume / Rp_denom;\n\n    return 0.5f * (Rs + Rp);\n}\n\n/**\n * Reference:\n *\n * [1] [Generalization of Adobe's Fresnel Model, Hoffman, 2023]\n * [2] [Adobe Standard Material, Technical Documentation, Kutz, Hasan, Edmondson]\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F adobe_f82_tint_fresnel(const ColorRGB32F& F0, const ColorRGB32F& F82, const ColorRGB32F& F90, float F90_falloff_exponent, float cos_theta)\n{\n    ColorRGB32F base_term = F0 + (F90 - F0) * pow(1.0f - cos_theta, F90_falloff_exponent);\n    if (base_term.max_component() < 1.0e-8f)\n        // Quick exit if the base term is super low to avoid numerical issues with super low\n        // float numbers\n        return ColorRGB32F(0.0f);\n\n    float lazanyi_correction = cos_theta * hippt::pow_6(1.0f - cos_theta);\n\n    // cos_theta_max for beta exponent = 6 in the lazanyi correction term\n    constexpr float cos_theta_max = 1.0f / 7.0f;\n    constexpr float denom_a = cos_theta_max * hippt::pow_6(1.0f - cos_theta_max);\n\n    ColorRGB32F nume_a = (F0 + (F90 - F0) * pow(1.0f - cos_theta_max, F90_falloff_exponent)) * (ColorRGB32F(1.0f) - F82);\n    ColorRGB32F a = nume_a / denom_a;\n\n    ColorRGB32F F = base_term - a * lazanyi_correction;\n    F.clamp(0.0f, 1.0f);\n\n    return F;\n}\n\n/**\n * Reference:\n * [1] [A Hitchhiker's Guide to Multiple Scattering, Eugene d'Eon] https://eugenedeon.com/hitchhikers\n * Eq. 11.15\n * \n * Hemispherical albedo (integral of directional albedos over view directions) \n * of a perfectly smooth dielectric layer. This is an approximated fit.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float fresnel_hemispherical_albedo_fit(float relative_eta)\n{\n    return logf((10893.0f * relative_eta - 1438.2f) / (-774.4f * hippt::square(relative_eta) + 10212.0f * relative_eta + 1.0f));\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/GBufferDevice.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GBUFFER_DEVICE_H\n#define GBUFFER_DEVICE_H\n\n#include \"Device/includes/RayVolumeState.h\"\n\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n\n// Structure of arrays for the data contained in the pixels of the GBuffer\n// \n// If you want the roughness of the pixel (X, Y) = [50, 0] for example,\n// get it at materials[50].get_roughness()\nstruct GBufferDevice\n{\n\tHIPRT_HOST_DEVICE float3 get_view_direction(float3 camera_position, int pixel_index) const\n\t{\n\t\treturn hippt::normalize(camera_position - primary_hit_position[pixel_index]);\n\t}\n\n\tDevicePackedEffectiveMaterial* materials = nullptr;\n\n\tint* first_hit_prim_index = nullptr;\n\tfloat3* primary_hit_position = nullptr;\n\n\t// We need both normals to correct the black fringes from the microfacet\n\t// model when used with smooth normals / normal mapping\n\tOctahedral24BitNormalPadded32b* shading_normals = nullptr;\n\tOctahedral24BitNormalPadded32b* geometric_normals = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/GMoN/GMoN.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_GMON_H\n#define DEVICE_INCLUDES_GMON_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/GMoN/GMoNMeansRadixSort.h\"\n#include \"Device/includes/GMoN/GMoNDevice.h\"\n#include \"HostDeviceCommon/Color.h\"\n\n// A bunch of macros here to streamline the code between the CPU and GPU\n#ifdef __KERNELCC__\n\n#define SORTED_MEANS_VARIABLE\n#define SORTED_MEANS_VARIABLE_WITH_COMMA\n// Nothing to declare for the sorted means: on the GPU the sorted means are in shared memory, already declared in 'GMoNMeansRadixSort'\n#define SORTED_MEANS_DECLARATION\n#define SORTED_MEANS_DECLARATION_WITH_COMMA\n#define SORTED_MEANS_ASSIGNATION(x) x\n// Getting the sorted mean of index 'mean_index' (in shared memory on the GPU)\n#define SORTED_MEANS_FETCH(mean_index) scratch_memory[SCRATCH_MEMORY_INDEX(0, (mean_index))]\n#define SORTED_INDEX_FETCH(set_index) (sorted_keys[SORTED_KEYS_INDEX(set_index)] & 0xFF)\n\n#else\n\n// Just a macro for the name of the sorted means std::vector\n#define SORTED_MEANS_VARIABLE sorted_means\n#define SORTED_MEANS_VARIABLE_WITH_COMMA ,sorted_means\n// On the CPU, the sorted means are in a std::vector\n#define SORTED_MEANS_DECLARATION std::pair<std::vector<unsigned int>, std::vector<unsigned short int>> SORTED_MEANS_VARIABLE\n#define SORTED_MEANS_DECLARATION_WITH_COMMA ,std::pair<std::vector<unsigned int>, std::vector<unsigned short int>> SORTED_MEANS_VARIABLE\n// Assigning to the sorted means vector\n#define SORTED_MEANS_ASSIGNATION(x) SORTED_MEANS_VARIABLE = (x)\n// Getting the sorted mean of index 'mean_index' (in the 'sorted_means' std::vector on the CPU)\n#define SORTED_MEANS_FETCH(mean_index) SORTED_MEANS_VARIABLE.first[(mean_index)]\n#define SORTED_INDEX_FETCH(set_index) (SORTED_MEANS_VARIABLE.second[set_index] & 0xFF)\n\n#endif\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float compute_gini_coefficient(SORTED_MEANS_DECLARATION)\n{\n    // Applying Eq. 4 of the paper\n    float sum_of_means = 0.0f;\n    float sum_of_means_weighted = 0.0f;\n\n    for (int j = 1; j <= GMoNMSetsCount; j++)\n    {\n        unsigned int sorted_mean_uint = SORTED_MEANS_FETCH(j - 1);\n        float sorted_mean_float = *reinterpret_cast<float*>(&sorted_mean_uint);\n\n        sum_of_means += sorted_mean_float;\n        sum_of_means_weighted += j * sorted_mean_float;\n    }\n\n    float nume = 2.0f * sum_of_means_weighted;\n    float denom = GMoNMSetsCount * sum_of_means;\n\n    if (denom == 0.0f)\n        return 0.0f;\n\n    return nume / denom - static_cast<float>(GMoNMSetsCount + 1) / GMoNMSetsCount;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F get_median_of_means(GMoNDevice gmon_device, unsigned int pixel_index, int2 render_resolution SORTED_MEANS_DECLARATION_WITH_COMMA)\n{\n    // Getting the index of the set for the sorted median\n    unsigned short int median_set_index = SORTED_INDEX_FETCH(GMoNMSetsCount / 2);\n\n    return gmon_device.sets[median_set_index * render_resolution.x * render_resolution.y + pixel_index];\n}\n\n/**\n * Computes the median of means over the sets and stores the\n * result in the 'result_framebuffer' buffer. The result will\n * be stored scaled by the number of samples rendered by the\n * path tracer so far such that dividing the 'result_framebuffer'\n * buffer by the number of samples yields the correct color for\n * displaying in the viewport\n */\nHIPRT_HOST_DEVICE ColorRGB32F gmon_compute_median_of_means(GMoNDevice gmon_device, uint32_t pixel_index, unsigned int sample_number, int2 render_resolution)\n{\n    SORTED_MEANS_DECLARATION;\n    SORTED_MEANS_ASSIGNATION(gmon_means_radix_sort(gmon_device.sets, pixel_index, sample_number, render_resolution));\n\n    switch (gmon_device.gmon_mode)\n    {\n    case GMoNDevice::GMoNMode::MEDIAN_OF_MEANS:\n        // Multiplying by the number of sets here because (with an example):\n        //  - If we have 5 sets\n        //  - We rendered 35 samples so far\n        //  - Each set has 7 samples\n        //  - But the display shader in the viewport expects 35 samples worth of intensity in the framebuffer\n        //  - So we need to return the color (which is 7 sample-accumulated) multiplied by the number of sets\n        //      to get back our 35\n        return get_median_of_means(gmon_device, pixel_index, render_resolution SORTED_MEANS_VARIABLE_WITH_COMMA) * GMoNMSetsCount;\n\n    case GMoNDevice::GMoNMode::BINARY_GMON:\n    {\n        float gini_coefficient = compute_gini_coefficient(SORTED_MEANS_VARIABLE);\n\n        // Eq. 5 of the paper\n        if (gini_coefficient <= 0.25f)\n        {\n            // Return the mean. We're actually just going to return the sum of the samples and it is the shader that\n            // displays in the viewport that is going to divide that sum by the number of samples rendered so far,\n            // thus giving us the mean\n            \n            ColorRGB32F sum;\n            for (int i = 0; i < GMoNMSetsCount; i++)\n                sum += gmon_device.sets[render_resolution.x * render_resolution.y * i + pixel_index];\n\n            return sum;\n        }\n        else\n            // Return the median of means\n             \n            // Multiplying by the number of sets here because (with an example):\n            //  - If we have 5 sets\n            //  - We rendered 35 samples so far\n            //  - Each set has 7 samples\n            //  - But the display shader in the viewport expects 35 samples worth of intensity in the framebuffer\n            //  - So we need to return the color (which is 7 sample-accumulated) multiplied by the number of sets\n            //      to get back our 35\n            return get_median_of_means(gmon_device, pixel_index, render_resolution SORTED_MEANS_VARIABLE_WITH_COMMA) * GMoNMSetsCount;\n    }\n\n    case GMoNDevice::GMoNMode::ADAPTIVE_GMON:\n    {\n        // Section 4.3 and Eq. 6\n        float gini_coefficient = compute_gini_coefficient(SORTED_MEANS_VARIABLE);\n        if (gini_coefficient == 0.0f)\n            return ColorRGB32F(0.0f);\n\n        int c = gini_coefficient * (GMoNMSetsCount / 2);\n\n        // Eq. 6\n        ColorRGB32F sum;\n        for (int i = c; i < GMoNMSetsCount - c; i++)\n            sum += gmon_device.sets[SORTED_INDEX_FETCH(i) * render_resolution.x * render_resolution.y + pixel_index];\n\n        // We want this function to return un-averaged colors such that it is\n        // the shader that displays in the viewport that does the averaging.\n        // That's why we have the multiplication by GMoNMSetsCount at the end (which isn't in the paper)\n        return sum / (GMoNMSetsCount - 2 * c) * GMoNMSetsCount;\n    }\n\n    default:\n        // We shouldn't be here, this means that the GMoNMode used isn't one of the GMoNMode enum\n        return ColorRGB32F(10000.0f, 0.0f, 10000.0f);\n    }\n \n    // We cannot be here, this would mean that the switch skipped every single case, including the default case\n    return ColorRGB32F(10000.0f, 0.0f, 0.0f);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/GMoN/GMoNDevice.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_GMON_DEVICE_H\n#define DEVICE_GMON_DEVICE_H\n\n#include \"HostDeviceCommon/Color.h\"\n\n/**\n * Data structure for the implementation of GMoN\n * \n * Reference:\n * [1] [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\n */\nstruct GMoNDevice\n{\n    enum GMoNMode\n    {\n        MEDIAN_OF_MEANS = 0,\n        BINARY_GMON = 1,\n        ADAPTIVE_GMON = 2,\n    };\n    GMoNMode gmon_mode = GMoNMode::ADAPTIVE_GMON;\n\n    // This is one very big buffer that contains all the sets we accumulate into for GMoN\n    //\n    // For example, for GMoNMSets == 5 and a render resolution of 1280x720,\n    // this is going to be a buffer that is 1280*720*5 elements long\n    ColorRGB32F* sets = nullptr;\n\n    // This is the buffer that contains the G-median of means result of each pixel and this is going\n    // to be displayed in the viewport instead of the regular framebuffer if GMoN is being used\n    ColorRGB32F* result_framebuffer = nullptr;\n\n    // Which is the next set that is going to receive the sample\n    unsigned int next_set_to_accumulate = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/GMoN/GMoNMeansRadixSort.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_GMON_RADIX_SORT_H\n#define DEVICE_GMON_RADIX_SORT_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/GMoN/GMoNMeansRadixSortHistogramDeclaration.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/KernelOptions/GMoNOptions.h\"\n#include \"HostDeviceCommon/Math.h\"\n\n// Some macros to make that single function work on the CPU and GPU\n#ifdef __KERNELCC__\n#define GMoNThreadsPerBlock (GMoNComputeMeansKernelThreadBlockSize * GMoNComputeMeansKernelThreadBlockSize)\n\n// Allocating enough shared memory for each thread to store the M keys it's going to need for sorting.\n// We multiply everything * 2 by radix sort isn't in place so we need *1 for the input buffer of keys to sort\n// and another *1 for the sorted keys\n__shared__ unsigned int scratch_memory[GMoNThreadsPerBlock * GMoNMSetsCount * 2];\n__shared__ unsigned short int sorted_keys[GMoNThreadsPerBlock * GMoNMSetsCount];\n\n#define ThreadIndex1D (threadIdx.x + threadIdx.y * blockDim.x)\n// The indexing used here tries to avoid bank conflicts\n#define SCRATCH_MEMORY_INDEX(input_buffer_index, key_index) (ThreadIndex1D + key_index * GMoNThreadsPerBlock + input_buffer_index * GMoNThreadsPerBlock * GMoNMSetsCount)\n#define SORTED_KEYS_INDEX(key_index) (ThreadIndex1D + key_index * GMoNThreadsPerBlock)\n\n#define RETURN_TYPE void\n\n#define INITIAL_STORE_KEY_IN_INPUT_BUFFER(key_index, value) scratch_memory[SCRATCH_MEMORY_INDEX(0, key_index)] = value\n\n#define READ_KEY(key_index) scratch_memory[SCRATCH_MEMORY_INDEX(input_buffer_index, key_index)]\n#define STORE_KEY(key_index, value) scratch_memory[SCRATCH_MEMORY_INDEX(!input_buffer_index, key_index)] = value\n\n#else // #ifdef __KERNELCC__\n\n#define SCRATCH_MEMORY_INDEX(input_buffer_index, key_index) (key_index)\n#define SORTED_KEYS_INDEX(key_index) (key_index)\n\n#define RETURN_TYPE std::pair<std::vector<unsigned int>, std::vector<unsigned short int>>\n\n#define INITIAL_STORE_KEY_IN_INPUT_BUFFER(key_index, value) keys[key_index] = value\n\n#define READ_KEY(key_index) (keys[SCRATCH_MEMORY_INDEX(42, key_index)])\n#define STORE_KEY(key_index, value) scratch_memory[SCRATCH_MEMORY_INDEX(42, key_index)] = value\n#endif\n\nHIPRT_HOST_DEVICE HIPRT_INLINE RETURN_TYPE gmon_means_radix_sort(ColorRGB32F* gmon_sets, uint32_t pixel_index, unsigned int sample_number, int2 render_resolution)\n{\n#ifndef __KERNELCC__\n\tstd::vector<unsigned int> keys_vector(GMoNMSetsCount);\n\tstd::vector<unsigned int> scratch_memory_vector(GMoNMSetsCount);\n\tstd::vector<unsigned short int> sorted_keys(GMoNMSetsCount);\n\tstd::vector<unsigned short int>& out_sorted_indices = sorted_keys;\n\n\tunsigned int* keys = keys_vector.data();\n\tunsigned int* scratch_memory = scratch_memory_vector.data();\n#else\n\tbool input_buffer_index = false;\n#endif\n\n\tconstexpr unsigned int number_of_keys = GMoNMSetsCount;\n\n\t// Loading in the input scratch memory\n\tfor (int key_index = 0; key_index < number_of_keys; key_index++)\n\t{\n\t\t// Note that this isn't actually the mean, this is just the value of the accumulated samples\n\t\t// If we wanted the mean, we would have to divide everyone by the number of samples\n\t\t// But dividing everyone by the same value isn't going to change the ordering so we don't have to do\n\t\t// that division\n\t\tfloat mean = gmon_sets[key_index * render_resolution.x * render_resolution.y + pixel_index].luminance();\n\n\t\t// Setting the means in the \"input buffer\"\n\t\tINITIAL_STORE_KEY_IN_INPUT_BUFFER(key_index, *reinterpret_cast<unsigned int*>(&mean));\n\t}\n\n\t// Initializing the sorted indices\n\t// \n\t// The sorted indices are 16 bits.\n\t// The low 8 bits are the actual sorted indices\n\t// The high 16 bits are used for internal machinery\n\t//\n\t// We only need to initialize the high bits here, the low bits\n\t// will be overwritten with the sorted indices\n\tfor (int i = 0; i < GMoNMSetsCount; i++)\n\t\tsorted_keys[SORTED_KEYS_INDEX(i)] = i << 8;\n\n\tfor (int digit = 0; digit < GMoNKeysNbDigitsForRadixSort; digit += GMoNSortRadixSize)\n\t{\n\t\tunsigned int radix_extraction_mask = ((1 << GMoNSortRadixSize) - 1) << digit;\n\t\tGMoNRadixSortHistogram histogram;\n\n\t\t// Computing the histogram for the counting sort\n\t\tfor (int key_index = 0; key_index < number_of_keys; key_index++)\n\t\t{\n\t\t\tunsigned int radix = READ_KEY(key_index) & radix_extraction_mask;\n\t\t\tradix >>= digit;\n\n\t\t\thistogram.increment(radix, 1);\n\t\t}\n\n\t\t// Computing the prefix sum for stable counting sort\n\t\tfor (int i = 1; i < 1 << GMoNSortRadixSize; i++)\n\t\t{\n\t\t\tunsigned int histogram_i_minus_1_value = histogram.fetch_value(i - 1);\n\t\t\thistogram.increment(i, histogram_i_minus_1_value);\n\t\t}\n\n\t\t// Reordering\n\t\tfor (int key_index = number_of_keys - 1; key_index >= 0; key_index--)\n\t\t{\n\t\t\tunsigned int key = READ_KEY(key_index);\n\t\t\tunsigned int radix = key & radix_extraction_mask;\n\t\t\tradix >>= digit;\n\n\t\t\thistogram.decrement(radix, 1);\n\t\t\tunsigned int histogram_value = histogram.fetch_value(radix);\n\t\t\tSTORE_KEY(histogram_value, key);\n\n\t\t\t// Also sorting a list of indices so that, when returning from this function,\n\t\t\t// we can find from the caller's code which ColorRGB corresponds to the median\n\t\t\t//\n\t\t\t// Clearing the low 8 bits\n\t\t\tsorted_keys[SORTED_KEYS_INDEX(histogram_value)] &= ~0xFF;\n\t\t\t// Setting the sorted index in the low 8 bits\n\t\t\tsorted_keys[SORTED_KEYS_INDEX(histogram_value)] |= (sorted_keys[SORTED_KEYS_INDEX(key_index)] >> 8);\n\t\t}\n\n\t\t// Bookkeeping to prepare the next sorting pass: copying the sorted indices\n\t\t// (in the low 8 bits) to the high 8 bits\n\t\tfor (int i = 0; i < GMoNMSetsCount; i++)\n\t\t{\n\t\t\t// Clearing the high 8 bits\n\t\t\tsorted_keys[SORTED_KEYS_INDEX(i)] &= ~(0xFF << 8);\n\t\t\t// Copying the low 8 bits to the high 8 bits\n\t\t\tsorted_keys[SORTED_KEYS_INDEX(i)] |= (sorted_keys[SORTED_KEYS_INDEX(i)] & 0xFF) << 8;\n\t\t}\n\n#ifdef __KERNELCC__\n\t\t// Swapping the buffer indices on the GPU\n\t\tinput_buffer_index = !input_buffer_index;\n#else\n\t\t// On the CPU, input/output ping-ponging is just a swap of pointer\n\t\tunsigned int* temp = keys;\n\t\tkeys = scratch_memory;\n\t\tscratch_memory = temp;\n#endif\n\t}\n\n#ifndef __KERNELCC__\n\t// The result is in keys for 32 digit keys\n\treturn std::make_pair<>(keys_vector, sorted_keys);\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/GMoN/GMoNMeansRadixSortHistogramDeclaration.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_GMON_RADIX_SORT_HISTROGRAM_DECLARATION_H\n#define DEVICE_GMON_RADIX_SORT_HISTROGRAM_DECLARATION_H\n\n#include \"HostDeviceCommon/KernelOptions/GMoNOptions.h\"\n\n// The maximum number of sets allowed is 31\n// This means that the values of the histogram will never go above 31\n// \n// 31 can be encoded with 5 bits\n// 1 unsigned int is 32 bits\n// \n// That makes 6 histogram bins of 5 bits per 32bits uint\n#define BITS_PER_HISTOGRAM_BIN 5\n#define MAX_BINS_PER_HISTOGRAM_UINT 6\n#define MAX_BINS_PER_HISTOGRAM_UINT_F 6.0f\n\n/**\n * We're using a class here to compute the histogram for two reasons:\n * \n *\t- Without this class, we would probably use an array unsigned int[HISTOGRAM_SIZE] but arrays like that\n *\t\tbehave very poorly with the HIP compiler so we're using simple unsigned int variables instead, not an array\n *\t\t(and that's why we have a big #if, #elif, #endif at the end of the structure to declare the histogram\n *\t\tvariables depending\ton how many we need)\n * \n *\t- We use this class to do some packing since we allow only a maximum of 31 sets, we can make some assumption\n *\t\tabout how many bits we need per histogram bins\n */\nstruct GMoNRadixSortHistogram\n{\n\t/**\n\t * This function adds 'value' to the correct histogram bin\n\t */\n\tHIPRT_HOST_DEVICE HIPRT_INLINE void increment(unsigned int index, unsigned int value)\n\t{\n\t\tunsigned int histogram_variable_index = static_cast<unsigned int>(index / MAX_BINS_PER_HISTOGRAM_UINT_F);\n\t\tunsigned int bin_index = index - histogram_variable_index * MAX_BINS_PER_HISTOGRAM_UINT;\n\n\t\tswitch (histogram_variable_index)\n\t\t{\n\t\tcase 0:\n\t\t\thistogram0 += value << (bin_index * BITS_PER_HISTOGRAM_BIN);\n\t\t\tbreak;\n\n#if GMoNSortRadixSize >= 4\n\t\tcase 1:\n\t\t\thistogram1 += value << (bin_index * BITS_PER_HISTOGRAM_BIN);\n\t\t\tbreak;\n\n\t\tcase 2:\n\t\t\thistogram2 += value << (bin_index * BITS_PER_HISTOGRAM_BIN);\n\t\t\tbreak;\n#endif\n\t\t}\n\t}\n\n\t/**\n\t * This function adds 'value' to the correct histogram bin\n\t */\n\tHIPRT_HOST_DEVICE HIPRT_INLINE void decrement(unsigned int index, unsigned int value)\n\t{\n\t\tunsigned int histogram_variable_index = static_cast<unsigned int>(index / MAX_BINS_PER_HISTOGRAM_UINT_F);\n\t\tunsigned int bin_index = index - histogram_variable_index * MAX_BINS_PER_HISTOGRAM_UINT;\n\n\t\t// Getting the current value of the bin\n\t\tunsigned int histogram_current_value = fetch_value(index);\n\n\t\t// Decrementing\n\t\thistogram_current_value -= value;\n\n\t\t// Clearing before setting\n\t\tclear_bin(index);\n\n\t\t// Adding (to the bin value that is now 0)\n\t\tincrement(index, histogram_current_value);\n\t}\n\n\t/**\n     * Returns\n     */\n\tHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int fetch_value(unsigned int index)\n\t{\n\t\tunsigned int histogram_variable_index = static_cast<unsigned int>(index / MAX_BINS_PER_HISTOGRAM_UINT_F);\n\t\tunsigned int bin_index = index - histogram_variable_index * MAX_BINS_PER_HISTOGRAM_UINT;\n\n\t\tswitch (histogram_variable_index)\n\t\t{\n\t\tcase 0:\n\t\t\treturn (histogram0 >> (bin_index * BITS_PER_HISTOGRAM_BIN)) & 31;\n\n#if GMoNSortRadixSize >= 4\n\t\tcase 1:\n\t\t\treturn (histogram1 >> (bin_index * BITS_PER_HISTOGRAM_BIN)) & 31;\n\n\t\tcase 2:\n\t\t\treturn (histogram2 >> (bin_index * BITS_PER_HISTOGRAM_BIN)) & 31;\n#endif\n\n\t\tdefault:\n\t\t\treturn -1;\n\t\t}\n\t}\n\n\tHIPRT_HOST_DEVICE HIPRT_INLINE void clear_bin(unsigned int index)\n\t{\n\t\tunsigned int histogram_variable_index = static_cast<unsigned int>(index / MAX_BINS_PER_HISTOGRAM_UINT_F);\n\t\tunsigned int bin_index = index - histogram_variable_index * MAX_BINS_PER_HISTOGRAM_UINT;\n\n\t\tswitch (histogram_variable_index)\n\t\t{\n\t\tcase 0:\n\t\t\thistogram0 &= ~(31 << (bin_index * BITS_PER_HISTOGRAM_BIN));\n\t\t\tbreak;\n\n#if GMoNSortRadixSize >= 4\n\t\tcase 1:\n\t\t\thistogram1 &= ~(31 << (bin_index * BITS_PER_HISTOGRAM_BIN));\n\t\t\tbreak;\n\n\t\tcase 2:\n\t\t\thistogram2 &= ~(31 << (bin_index * BITS_PER_HISTOGRAM_BIN));\n\t\t\tbreak;\n#endif\n\t\t}\n\t}\n\n#if GMoNSortRadixSize == 1\n\tunsigned int histogram0 = 0;\n#elif GMoNSortRadixSize == 2\n\tunsigned int histogram0 = 0;\n#elif GMoNSortRadixSize == 4\n\tunsigned int histogram0 = 0, histogram1 = 0, histogram2 = 0;\n#endif\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Hash.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_HASH_H\n#define DEVICE_HASH_H\n\n#include <hiprt/hiprt_device.h>\n\nHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n{\n    seed = (seed ^ 61) ^ (seed >> 16);\n    seed *= 9;\n    seed = seed ^ (seed >> 4);\n    seed *= 0x27d4eb2d;\n    seed = seed ^ (seed >> 15);\n    return seed;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/HashGrid.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n #ifndef DEVICE_INCLUDES_HASH_GRID_H\n #define DEVICE_INCLUDES_HASH_GRID_H\n\n #include \"HostDeviceCommon/KernelOptions/ReGIROptions.h\"\n#include \"HostDeviceCommon/Math.h\"\n\nstruct HashGrid\n{\n\tstatic constexpr unsigned int UNDEFINED_CHECKSUM_OR_GRID_INDEX = 0xFFFFFFFF;\n\n    /**\n\t * Returns true if the collision was resolved with success and the new hash\n\t * (or unchanged if there was no collision) is set in 'in_out_base_hash'\n\t * \n\t * Returns false if the given 'in_out_hash_cell_index' refers to a hash cell that hasn't been\n\t * allocated yet or if there was a collision but it couldn't be resolved and the collision resolution was\n\t * aborted because too many iterations\n\t */\n\ttemplate <int maxCollisionResolveSteps, bool isInsertion = false>\n\tHIPRT_DEVICE static bool resolve_collision(AtomicType<unsigned int>* checksum_buffer, unsigned int total_number_of_cells, unsigned int& in_out_hash_cell_index, unsigned int checksum, unsigned int opt_existing_checksum = HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t{\n\t\tunsigned int existing_checksum;\n\t\tif (opt_existing_checksum != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t// The current hash key was passed as an argument, no need to fetch from memory\n\t\t\texisting_checksum = opt_existing_checksum;\n\t\telse\n\t\t\texisting_checksum = checksum_buffer[in_out_hash_cell_index];\n\n\t\tif (existing_checksum == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t{\n\t\t\t// This is refering to a hash cell that hasn't been populated yet\n\n\t\t\tif (!isInsertion)\n\t\t\t{\n\t\t\t\t// If we're not inserting, this means that we're querrying an empty cell\n\t\t\t\treturn false;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t// This is refering to a hash cell that hasn't been populated yet and we're\n\t\t\t\t// inserting into it so we just found an empty cell first try\n\t\t\t\t// \n\t\t\t\t// Let's try to insert atomically into it\n\n\t\t\t\tunsigned int previous_checksum = hippt::atomic_compare_exchange(&checksum_buffer[in_out_hash_cell_index], HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX, checksum);\n\t\t\t\tif (previous_checksum == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t\t{\n\t\t\t\t\t// (and we made sure sure through an atomic CAS that someone else wasn't\n\t\t\t\t\t// also competing for that empty cell)\n\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse if (previous_checksum == checksum)\n\t\t\t\t{\n\t\t\t\t\t// Another thread just inserted the same hash key at the same time but this\n\t\t\t\t\t// current thread here wasn't fast enough on the atomic compare exchange above\n\t\t\t\t\t// so the key was already inserted.\n\n\t\t\t\t\t// This thread has nothing else to do.\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\t// Another hash key has been inserted in the same position, we're going to have to\n\t\t\t\t\t// probe for a good position\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif (existing_checksum != checksum)\n\t\t{\n\t\t\t// This is a collision\n\n\t\t\tunsigned int base_cell_index = in_out_hash_cell_index;\n\t\t\tunsigned int current_cell_index_collision_resolution = base_cell_index;\n\n\t\t\t// Collision resolution\n\t\t\tfor (int i = 1; i <= maxCollisionResolveSteps; i++)\n\t\t\t{\n\t\t\t\tcurrent_cell_index_collision_resolution = collision_resolution_next_cell_index<ReGIR_HashGridCollisionResolutionMode>(current_cell_index_collision_resolution, total_number_of_cells);\n\t\t\t\tif (current_cell_index_collision_resolution == base_cell_index)\n\t\t\t\t\t// We looped on the whole hash table. Couldn't find an empty cell\n\t\t\t\t\treturn false;\n\n\t\t\t\tunsigned int next_cell_checksum = checksum_buffer[current_cell_index_collision_resolution];\n\t\t\t\tif (next_cell_checksum == checksum)\n\t\t\t\t{\n\t\t\t\t\t// Stopping if we found our proper cell (with our hash).\n\t\t\t\t\t//\n\t\t\t\t\t// This means that we have resolved the collision \n\n\t\t\t\t\tin_out_hash_cell_index = current_cell_index_collision_resolution;\n\n\t\t\t\t\treturn true;\n\t\t\t\t}\n\t\t\t\telse if (next_cell_checksum == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t\t{\n\t\t\t\t\tif (isInsertion)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Stopping if we found an empty cell for insertion\n\n\t\t\t\t\t\tunsigned int previous_checksum = hippt::atomic_compare_exchange(&checksum_buffer[current_cell_index_collision_resolution], HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX, checksum);\n\t\t\t\t\t\tif (previous_checksum == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// (and we made sure sure through an atomic CAS that someone else wasn't\n\t\t\t\t\t\t\t// also competing for that empty cell)\n\n\t\t\t\t\t\t\tin_out_hash_cell_index = current_cell_index_collision_resolution;\n\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t\telse if (previous_checksum == checksum)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Another thread just inserted the same hash key at the same time but this\n\t\t\t\t\t\t\t// current thread here wasn't fast enough on the atomic compare exchange\n\t\t\t\t\t\t\t// above so the key was already inserted.\n\n\t\t\t\t\t\t\tin_out_hash_cell_index = current_cell_index_collision_resolution;\n\n\t\t\t\t\t\t\t// This thread has nothing else to do.\n\t\t\t\t\t\t\treturn true;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\t// This is a query but we've hit an empty cell during probing which means that we're querrying\n\t\t\t\t\t\t// a cell that has never been populated\n\n\t\t\t\t\t\treturn false;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Linear probing couldn't find a valid position in the hash map\n\t\t\treturn false;\n\t\t}\n\t\telse\n\t\t\t// This is already our hash, no collision\n\t\t\treturn true;\n\t}\n\n\ttemplate <unsigned int collisionResolutionMode>\n\tHIPRT_DEVICE static unsigned int collision_resolution_next_cell_index(unsigned int current_cell_index_collision_resolution, unsigned int total_number_of_cells)\n\t{\n\t\tif constexpr (collisionResolutionMode == REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE_LINEAR_PROBING)\n\t\t{\n\t\t\treturn (current_cell_index_collision_resolution + 1) % total_number_of_cells;\n\t\t}\n\t\telse if constexpr(collisionResolutionMode == REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE_REHASHING)\n\t\t{\n\t\t\treturn wang_hash(current_cell_index_collision_resolution) % total_number_of_cells;\n\t\t}\n\t}\n};\n\n #endif\n"
  },
  {
    "path": "src/Device/includes/HashGridHash.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_HASH_GRID_HASH_H\n#define DEVICE_INCLUDES_HASH_GRID_HASH_H\n \n#include \"HostDeviceCommon/HIPRTCamera.h\"\n\n/**\n * PCG for the first hash function\n */\nHIPRT_DEVICE HIPRT_INLINE unsigned int h1_pcg(unsigned int seed)\n{\n    unsigned int state = seed * 747796405u + 2891336453u;\n    unsigned int word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;\n    \n    return (word >> 22u) ^ word;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int h1_pcg(float seed)\n{\n    return h1_pcg(hippt::float_as_uint(seed));\n}\n\n/**\n * xxhash32 for the second hash function\n */\nHIPRT_DEVICE HIPRT_INLINE unsigned int h2_xxhash32(unsigned int seed)\n{\n    constexpr unsigned int PRIME32_2 = 2246822519U;\n    constexpr unsigned int PRIME32_3 = 3266489917U;\n    constexpr unsigned int PRIME32_4 = 668265263U;\n    constexpr unsigned int PRIME32_5 = 374761393U;\n\n    unsigned int h32 = seed + PRIME32_5;\n\n    h32 = PRIME32_4 * ((h32 << 17) | (h32 >> (32 - 17)));\n    h32 = PRIME32_2 * (h32 ^ (h32 >> 15));\n    h32 = PRIME32_3 * (h32 ^ (h32 >> 13));\n\n    return h32^(h32 >> 16);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int h2_xxhash32(float seed)\n{\n    return h2_xxhash32(hippt::float_as_uint(seed));\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 hash_periodic_shifting(float3 base_position, float grid_cell_size)\n{\n    float scaling = 0.1f * grid_cell_size;\n\n    constexpr float frequency_per_grid_cell = 5.0f;\n    constexpr float frequency_per_grid_cell_inverse = 1.0f / frequency_per_grid_cell;\n    const float frequency = 1.0f / (grid_cell_size * frequency_per_grid_cell_inverse);\n\n    return make_float3(\n        base_position.x + (hippt::intrin_cosf(base_position.z * frequency) + hippt::intrin_cosf(base_position.y * frequency)) * scaling * 0.5f,\n        base_position.y + (hippt::intrin_cosf(base_position.x * frequency) + hippt::intrin_cosf(base_position.z * frequency)) * scaling * 0.5f,\n        base_position.z + (hippt::intrin_cosf(base_position.y * frequency) + hippt::intrin_cosf(base_position.x * frequency)) * scaling * 0.5f);\n}\n\n/**\n * The 'precision' factor controls the discretization of the normal. \n * Higher values mean more discretization steps mean more precision.\n * \n * 2 is a default good value for 'precision'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int hash_quantize_normal(float3 normal, unsigned int precision)\n{\n    float precision_f = precision;\n\n    unsigned int x = static_cast<unsigned int>(normal.x * precision_f) << (2 * precision);\n    unsigned int y = static_cast<unsigned int>(normal.y * precision_f) << (1 * precision);\n    unsigned int z = static_cast<unsigned int>(normal.z * precision_f);\n\n    return x | y | z;\n}\n\n /**\n * Reference: [WORLD-SPACE SPATIOTEMPORAL RESERVOIR REUSE FOR RAY-TRACED GLOBAL ILLUMINATION, Boisse, 2021]\n */\nHIPRT_DEVICE HIPRT_INLINE float compute_adaptive_cell_size(float3 world_position, const HIPRTCamera& current_camera, float target_projected_size, float grid_cell_min_size)\n{\n    int width = current_camera.sensor_width;\n    int height = current_camera.sensor_height;\n\n    float cell_size_step = hippt::length(world_position - current_camera.position) * tanf(target_projected_size * current_camera.vertical_fov * hippt::max(1.0f / height, (float)height / hippt::square(width)));\n    float log_step = floorf(log2f(cell_size_step / grid_cell_min_size));\n\n    return hippt::max(grid_cell_min_size, grid_cell_min_size * exp2f(log_step));\n}\n\n/**\n * Returns the hash cell index of the given world position and camera position. Does not resolve collisions.\n * The hash key for resolving collision is given in 'out_checksum'\n */\nHIPRT_DEVICE HIPRT_INLINE unsigned int hash_pos_distance_to_camera(unsigned int total_number_of_cells, float3 world_position, const HIPRTCamera& current_camera, float target_projected_size, float grid_cell_min_size, unsigned int& out_checksum)\n{\n    float cell_size = compute_adaptive_cell_size(world_position, current_camera, target_projected_size, grid_cell_min_size);\n\n    // Periodic shifting to avoid float precision issues when, for example, rays hit a surface\n    // that is perfectly at Y=0 (the floor of the scene for example).\n    // \n    // In that example, because of float imprecisions, rays hitting the floor will never have\n    // a y=0 hit coordinate but rather be slightly negative or slightly positive, depending\n    // on float imprecisions and this will actually create some noisy patterns where random rays access the hash \n    // grid cell that has Y-negative and some other randoms rays access the Y-positive hash grid cell\n    //\n    // Reference: SIGGRAPH 2022 - Advances in Spatial Hashing\n    world_position = hash_periodic_shifting(world_position, cell_size);\n\n    unsigned int grid_coord_x = static_cast<int>(floorf(world_position.x / cell_size));\n    unsigned int grid_coord_y = static_cast<int>(floorf(world_position.y / cell_size));\n    unsigned int grid_coord_z = static_cast<int>(floorf(world_position.z / cell_size));\n\n    // Using two hash functions as proposed in [WORLD-SPACE SPATIOTEMPORAL RESERVOIR REUSE FOR RAY-TRACED GLOBAL ILLUMINATION, Boisse, 2021]\n    out_checksum = h2_xxhash32(cell_size + h2_xxhash32(grid_coord_z + h2_xxhash32(grid_coord_y + h2_xxhash32(grid_coord_x))));\n    \n    unsigned int cell_hash = h1_pcg(cell_size + h1_pcg(grid_coord_z + h1_pcg(grid_coord_y + h1_pcg(grid_coord_x)))) % total_number_of_cells;\n\n    return cell_hash;\n}\n\nHIPRT_DEVICE HIPRT_INLINE unsigned int hash_double_position_camera(unsigned int total_number_of_cells, float3 world_position_1, float3 world_position_2, const HIPRTCamera& current_camera, float target_projected_size, float grid_cell_min_size, unsigned int& out_checksum)\n{\n    float cell_size_1 = compute_adaptive_cell_size(world_position_1, current_camera, target_projected_size, grid_cell_min_size);\n    float cell_size_2 = compute_adaptive_cell_size(world_position_2, current_camera, target_projected_size, grid_cell_min_size);\n\n    // Periodic shifting to avoid float precision issues when, for example, rays hit a surface\n    // that is perfectly at Y=0 (the floor of the scene for example).\n    // \n    // In that example, because of float imprecisions, rays hitting the floor will never have\n    // a y=0 hit coordinate but rather be slightly negative or slightly positive, depending\n    // on float imprecisions and this will actually create some noisy patterns where random rays access the hash \n    // grid cell that has Y-negative and some other randoms rays access the Y-positive hash grid cell\n    //\n    // Reference: SIGGRAPH 2022 - Advances in Spatial Hashing\n    world_position_1 = hash_periodic_shifting(world_position_1, cell_size_1);\n    world_position_2 = hash_periodic_shifting(world_position_2, cell_size_2);\n\n    unsigned int grid_coord_x_1 = static_cast<int>(floorf(world_position_1.x / cell_size_1));\n    unsigned int grid_coord_y_1 = static_cast<int>(floorf(world_position_1.y / cell_size_1));\n    unsigned int grid_coord_z_1 = static_cast<int>(floorf(world_position_1.z / cell_size_1));\n\n    unsigned int grid_coord_x_2 = static_cast<int>(floorf(world_position_2.x / cell_size_2));\n    unsigned int grid_coord_y_2 = static_cast<int>(floorf(world_position_2.y / cell_size_2));\n    unsigned int grid_coord_z_2 = static_cast<int>(floorf(world_position_2.z / cell_size_2));\n\n    // Using two hash functions as proposed in [WORLD-SPACE SPATIOTEMPORAL RESERVOIR REUSE FOR RAY-TRACED GLOBAL ILLUMINATION, Boisse, 2021]\n    unsigned int hash_1 = h2_xxhash32(cell_size_1 + h2_xxhash32(grid_coord_z_1 + h2_xxhash32(grid_coord_y_1 + h2_xxhash32(grid_coord_x_1))));\n    unsigned int hash_2 = h2_xxhash32(cell_size_2 + h2_xxhash32(grid_coord_z_2 + h2_xxhash32(grid_coord_y_2 + h2_xxhash32(grid_coord_x_2))));\n    out_checksum = h2_xxhash32(hash_1 ^ hash_2);\n    \n    unsigned int cell_hash_1 = h1_pcg(cell_size_1 + h1_pcg(grid_coord_z_1 + h1_pcg(grid_coord_y_1 + h1_pcg(grid_coord_x_1))));\n    unsigned int cell_hash_2 = h1_pcg(cell_size_2 + h1_pcg(grid_coord_z_2 + h1_pcg(grid_coord_y_2 + h1_pcg(grid_coord_x_2))));\n    unsigned int cell_hash = h1_pcg(cell_hash_1 ^ cell_hash_2) % total_number_of_cells;\n\n    return cell_hash;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Intersect.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INTERSECT_H\n#define DEVICE_INTERSECT_H\n\n#include \"Device/includes/Dispersion.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Material.h\"\n#include \"Device/includes/ONB.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/Texture.h\"\n#include \"Device/includes/TriangleStructures.h\"\n#include \"Device/functions/FilterFunction.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Math.h\"\n\n#if SharedStackBVHTraversalSize > 0\n// This if is necessary to avoid declaring 0 size arrays if the\n// shared stack traversal sizes are 0\n__shared__ static int shared_stack_cache[SharedStackBVHTraversalSize * KernelWorkgroupThreadCount];\n#endif\n\n//#define __KERNELCC__\n\n#ifdef __KERNELCC__\n\n#if SharedStackBVHTraversalSize > 0\n#define DECLARE_SHARED_STACK_BUFFER shared_stack_buffer{ SharedStackBVHTraversalSize, shared_stack_cache }\n#else\n#define DECLARE_SHARED_STACK_BUFFER shared_stack_buffer{ 0, nullptr }\n#endif\n\n#if UseSharedStackBVHTraversal == KERNEL_OPTION_TRUE\n#define CONSTRUCT_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom) hiprtGeomTraversalClosestCustomStack<hiprtGlobalStack> traversal_variable_name(GPU_BVH_hiprtGeom, ray, global_stack, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0)\n#define CONSTRUCT_HIPRT_ANY_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom) hiprtGeomTraversalAnyHitCustomStack<hiprtGlobalStack> traversal_variable_name(GPU_BVH_hiprtGeom, ray, global_stack, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0)\n#else\n#define CONSTRUCT_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom) hiprtGeomTraversalClosest traversal_variable_name(GPU_BVH_hiprtGeom, ray, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0);\n#define CONSTRUCT_HIPRT_ANY_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom) hiprtGeomTraversalAnyHit traversal_variable_name(GPU_BVH_hiprtGeom, ray, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0);\n#endif\n\n#define DECLARE_HIPRT_CLOSEST_ANY_HIT_COMMON(render_data, GPU_BVH_hiprtGeom, ray, last_hit_primitive_index, random_number_generator)   \\\n  /* Payload for the alpha testing filter function */                                                               \\\n  FilterFunctionPayload payload;                                                                                    \\\n  payload.render_data = &render_data;                                                                               \\\n  payload.random_number_generator = &random_number_generator;                                                       \\\n  /* Filling the payload with the last hit primitive index to avoid self intersections */                           \\\n  /* (avoid that the ray intersects the triangle it is currently sitting on) */                                     \\\n  payload.last_hit_primitive_index = last_hit_primitive_index;                                                      \\\n  payload.simplified_light_ray = GPU_BVH_hiprtGeom == render_data.light_GPU_BVH;                                    \\\n  payload.bounce = bounce;                                                                                          \\\n                                                                                                                    \\\n  hiprtSharedStackBuffer DECLARE_SHARED_STACK_BUFFER;                                                               \\\n  hiprtGlobalStack global_stack(render_data.global_traversal_stack_buffer, shared_stack_buffer);\n\n\n\n\n#define DECLARE_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal_variable_name, render_data, GPU_BVH_hiprtGeom, ray, last_hit_primitive_index, random_number_generator) \\\n  DECLARE_HIPRT_CLOSEST_ANY_HIT_COMMON(render_data, GPU_BVH_hiprtGeom, ray, last_hit_primitive_index, random_number_generator);                              \\\n  CONSTRUCT_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom);\n\n#define DECLARE_HIPRT_ANY_HIT_TRAVERSAL(traversal_variable_name, render_data, GPU_BVH_hiprtGeom, ray, last_hit_primitive_index, random_number_generator) \\\n  DECLARE_HIPRT_CLOSEST_ANY_HIT_COMMON(render_data, GPU_BVH_hiprtGeom, ray, last_hit_primitive_index, random_number_generator);                          \\\n  CONSTRUCT_HIPRT_ANY_HIT_TRAVERSAL(traversal_variable_name, GPU_BVH_hiprtGeom);\n\n#endif\n\n/* References:\n * \n * [1] [Foundations of Game Engine Development: Rendering - Tangent/Bitangent calculation] http://foundationsofgameenginedev.com/#fged2\n */\nHIPRT_DEVICE HIPRT_INLINE float3 normal_mapping(const HIPRTRenderData& render_data, int normal_map_texture_index, TriangleIndices triangle_vertex_indices, TriangleTexcoords& texcoords, const float2& interpolated_texcoords, const float3& surface_normal)\n{\n    // Calculating tangents and bitangents aligned with texture U and V coordinates\n    float2 P0_texcoords = texcoords.x;\n    float2 P1_texcoords = texcoords.y;\n    float2 P2_texcoords = texcoords.z;\n\n    float2 delta_P1P0_texcoords = P1_texcoords - P0_texcoords;\n    float2 delta_P2P0_texcoords = P2_texcoords - P0_texcoords;\n\n    float3 P0 = render_data.buffers.vertices_positions[triangle_vertex_indices.x];\n    float3 P1 = render_data.buffers.vertices_positions[triangle_vertex_indices.y];\n    float3 P2 = render_data.buffers.vertices_positions[triangle_vertex_indices.z];\n\n    float3 edge_P0P1 = P1 - P0;\n    float3 edge_P0P2 = P2 - P0;\n\n    // To counter degenerate UVs\n    constexpr float det_bias = 1.0e-6f;\n    float det = delta_P1P0_texcoords.x * delta_P2P0_texcoords.y - delta_P1P0_texcoords.y * delta_P2P0_texcoords.x + det_bias;\n    // Check if the det isn't too low to avoid degenerate geometries that can then cause NaNs\n    float det_inverse = 1.0f / det;\n    float3 T = (edge_P0P1 * delta_P2P0_texcoords.y - edge_P0P2 * delta_P1P0_texcoords.y) * det_inverse;\n    float3 B = (edge_P0P2 * delta_P1P0_texcoords.x - edge_P0P1 * delta_P2P0_texcoords.x) * det_inverse;\n    if (hippt::length2(T) < 1.0e-6f || hippt::length2(B) < 1.0e-6f)\n        // The tangent or the bitangent is degenerate\n        return surface_normal;\n\n    ColorRGB32F normal = sample_texture_rgb_8bits(render_data.buffers.material_textures, normal_map_texture_index, /* is_srgb */ false, interpolated_texcoords);\n    // Bringing the normal in [-x, x]. x doesn't really matter since we normalize the result anyway\n    normal -= ColorRGB32F(0.5f);\n\n    float3 normal_tangent_space = hippt::normalize(make_float3(normal.r, normal.g, normal.b));\n\n    return local_to_world_frame(hippt::normalize(T), hippt::normalize(B), surface_normal, normal_tangent_space);\n}\n\nHIPRT_DEVICE HIPRT_INLINE float3 get_shading_normal(const HIPRTRenderData& render_data, const float3& geometric_normal, TriangleIndices triangle_vertex_indices, TriangleTexcoords triangle_texcoords, int primitive_index, const float2& uv, const float2& interpolated_texcoords)\n{\n    if (!render_data.render_settings.do_normal_mapping)\n        return geometric_normal;\n\n    // Do smooth shading first if we have vertex normals\n    float3 surface_normal;\n    if (render_data.buffers.has_vertex_normals[triangle_vertex_indices.x])\n        // Smooth normal available for the triangle\n        surface_normal = hippt::normalize(uv_interpolate(triangle_vertex_indices, render_data.buffers.vertex_normals, uv));\n    else\n        surface_normal = geometric_normal;\n\n    // Do normal mapping if we have a normal map\n    int material_index = render_data.buffers.material_indices[primitive_index];\n    unsigned short int normal_map_texture_index = render_data.buffers.materials_buffer.get_normal_map_texture_index(material_index);\n    if (normal_map_texture_index != MaterialConstants::NO_TEXTURE)\n        surface_normal = normal_mapping(render_data, normal_map_texture_index, triangle_vertex_indices, triangle_texcoords, interpolated_texcoords, surface_normal);\n\n    return surface_normal;\n}\n\n/**\n * Flips the surface normals if necessary such that they are facing us. \n * \n * The normals are only flipped if some conditions are met, read the \n * comment in the function for more details\n */\nHIPRT_DEVICE HIPRT_INLINE void fix_backfacing_normals(HitInfo& hit_info, const float3& view_direction)\n{\n    if (hippt::dot(view_direction, hit_info.geometric_normal) < 0.0f)\n    {\n        // The geometry isn't front-facing\n\n        hit_info.geometric_normal *= -1.0f;\n        hit_info.shading_normal *= -1.0f;\n    }\n\n    if (hippt::dot(view_direction, hit_info.shading_normal) < 0.0f)\n        // Flipping the normal such that the view direction isn't below the shading hemisphere anymore\n        hit_info.shading_normal *= -1.0f;\n\n    // Now ensuring that a perfectly reflected direction (about the shading normal) doesn't go below the *geometric* surface\n    float3 perfect_reflected_direction = reflect_ray(view_direction, hit_info.shading_normal);\n    if (hippt::dot(perfect_reflected_direction, hit_info.geometric_normal) <= 0.0f)\n    {\n        // The perfectly reflected direction *is* below the geometric normal,\n        // we're going to pull the shading normal towards the geometric normal such that\n        // the perfectly reflected direction now is just an epsilon above the surface\n        //\n        // This is done by first computing a new reflected direction that is just above the surface\n        // and then recomputing the new shading normal as the half vector between the new reflect direction\n        // and the view direction\n\n        constexpr float epsilon = 0.01;\n\n        perfect_reflected_direction -= hippt::normalize((hippt::dot(perfect_reflected_direction, hit_info.geometric_normal) - epsilon) * hit_info.geometric_normal);\n\n        // The new shading normal is the half vector between the pulled up reflected direction\n        // and the view direction\n        hit_info.shading_normal = hippt::normalize(view_direction + perfect_reflected_direction);\n    }\n}\n\n#ifndef __KERNELCC__\n#include \"Renderer/BVH.h\"\nHIPRT_DEVICE HIPRT_INLINE hiprtHit intersect_scene_cpu(const HIPRTRenderData& render_data, BVH* bvh, const hiprtRay& ray, int last_hit_primitive_index, Xorshift32Generator& random_number_generator)\n{\n    FilterFunctionPayload filter_function_payload;\n    filter_function_payload.simplified_light_ray = bvh == render_data.cpu_only.light_bvh;\n    filter_function_payload.render_data = &render_data;\n    filter_function_payload.random_number_generator = &random_number_generator;\n    // Filling the payload with the last hit primitive index to avoid self intersections\n    // (avoid that the ray intersects the triangle it is currently sitting on)\n    filter_function_payload.last_hit_primitive_index = last_hit_primitive_index;\n\n    hiprtHit hiprtHit;\n    bvh->intersect(ray, hiprtHit, &filter_function_payload);\n\n    return hiprtHit;\n}\n#endif\n\n/**\n * Returns true if a hit was found, false otherwise\n */\nHIPRT_DEVICE HIPRT_INLINE bool trace_main_path_ray(const HIPRTRenderData& render_data, hiprtRay ray, RayPayload& in_out_ray_payload, HitInfo& out_hit_info, int last_hit_primitive_index, int bounce, Xorshift32Generator& random_number_generator)\n{\n#ifdef __KERNELCC__\n    if (render_data.GPU_BVH == nullptr)\n        // Empty scene --> no intersection\n        return false;\n#endif\n\n    hiprtHit hit;\n    bool skipping_volume_boundary = false;\n    do\n    {\n#ifdef __KERNELCC__\n        DECLARE_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal, render_data, render_data.GPU_BVH, ray, last_hit_primitive_index, random_number_generator);\n        \n        hit = traversal.getNextHit();\n#else\n        hit = intersect_scene_cpu(render_data, render_data.cpu_only.bvh, ray, last_hit_primitive_index, random_number_generator);\n#endif\n\n        if (!hit.hasHit())\n            return false;\n\n        TriangleIndices triangle_vertex_indices = load_triangle_vertex_indices(render_data.buffers.triangles_indices, hit.primID);\n        TriangleTexcoords triangle_texcoords = load_triangle_texcoords(render_data.buffers.texcoords, triangle_vertex_indices);\n\n        out_hit_info.inter_point = ray.origin + hit.t * ray.direction;\n        out_hit_info.primitive_index = hit.primID;\n        out_hit_info.texcoords = uv_interpolate(triangle_texcoords, hit.uv);\n        // TODO hit.normal is in object space, this simple approach will not work if using\n        // multiple-levels BVH (TLAS/BLAS). We'll have to  transform by the BLAS transform\n        out_hit_info.geometric_normal = hippt::normalize(hit.normal);\n        out_hit_info.shading_normal = get_shading_normal(render_data, out_hit_info.geometric_normal, triangle_vertex_indices, triangle_texcoords, hit.primID, hit.uv, out_hit_info.texcoords);\n        out_hit_info.t = hit.t;\n\n        int material_index = render_data.buffers.material_indices[hit.primID];\n        in_out_ray_payload.material = get_intersection_material(render_data, material_index, out_hit_info.texcoords);\n\n        skipping_volume_boundary = in_out_ray_payload.volume_state.interior_stack.push(\n            in_out_ray_payload.volume_state.incident_mat_index, in_out_ray_payload.volume_state.outgoing_mat_index, in_out_ray_payload.volume_state.inside_material, material_index, in_out_ray_payload.material.get_dielectric_priority());\n\n        if (in_out_ray_payload.volume_state.inside_material)\n            // If we're traveling inside a volume, accumulating the distance for Beer's law\n            in_out_ray_payload.volume_state.distance_in_volume += hit.t;\n\n        fix_backfacing_normals(out_hit_info, -ray.direction);\n\n        if (skipping_volume_boundary)\n        {\n            // If we're skipping, the boundary, the ray just keeps going on its way\n            ray.origin = out_hit_info.inter_point;\n\n            // Don't forget to increment the distance traveled\n            // TODO: Are we not double counting the distance here and a few lines above (where we set the .t, .uv, .geometric_normal, ...)\n            in_out_ray_payload.volume_state.distance_in_volume += hit.t;\n        }\n\n    } while ((skipping_volume_boundary && hit.hasHit()));\n\n    if (in_out_ray_payload.material.dispersion_scale > 0.0f && in_out_ray_payload.material.specular_transmission > 0.0f && in_out_ray_payload.volume_state.sampled_wavelength == 0.0f)\n        // If we hit a dispersive material, we sample the wavelength that will be used\n        // for computing the wavelength dependent IORs used for dispersion\n        //\n        // We're also not re-doing the sampling if a wavelength has already been sampled for that path\n        //\n        // Negating the wavelength to indicate that the throughput filter of the wavelength\n        // hasn't been applied yet (applied in principled_glass_eval())\n        in_out_ray_payload.volume_state.sampled_wavelength = -sample_wavelength_uniformly(random_number_generator);\n\n    return hit.hasHit();\n}\n\n/**\n * Returns true if in shadow (a hit was found before 't_max' distance)\n * Returns false if unoccluded\n */\nHIPRT_DEVICE HIPRT_INLINE bool evaluate_shadow_ray_occluded(const HIPRTRenderData& render_data, hiprtRay ray, float t_max, int last_hit_primitive_index, int bounce, Xorshift32Generator& random_number_generator)\n{\n#ifdef __KERNELCC__\n    if (render_data.GPU_BVH == nullptr)\n        // Empty scene --> no intersection\n        return false;\n#endif\n\n#ifdef __KERNELCC__\n    ray.maxT = t_max - 1.0e-4f;\n\n    DECLARE_HIPRT_ANY_HIT_TRAVERSAL(traversal, render_data, render_data.GPU_BVH, ray, last_hit_primitive_index, random_number_generator);\n\n    hiprtHit shadow_ray_hit = traversal.getNextHit();\n    if (!shadow_ray_hit.hasHit())\n        return false;\n\n    return true;\n#else\n    float alpha = 1.0f;\n    // The total distance of our ray. Incremented after each hit\n    // (we may find multiple hits if we hit transparent texture\n    // and keep intersecting the scene)\n    float cumulative_t = 0.0f;\n\n    hiprtHit hit;\n    do\n    {\n        // We should use ray tracing filter functions here instead of re-tracing new rays\n        hit = intersect_scene_cpu(render_data, render_data.cpu_only.bvh, ray, last_hit_primitive_index, random_number_generator);\n        if (!hit.hasHit())\n            return false;\n\n        if (render_data.render_settings.do_alpha_testing)\n            alpha = get_hit_base_color_alpha(render_data, hit);\n        else\n            alpha = 1.0f;\n\n        // Next ray origin\n        ray.origin = ray.origin + ray.direction * hit.t;\n        cumulative_t += hit.t;\n\n        // We keep going as long as the alpha is < 1.0f meaning that we hit texture transparency\n    } while (alpha < 1.0f && cumulative_t < t_max - 1.0e-4f);\n\n    // If we found a hit and that it is close enough\n    return hit.hasHit() && cumulative_t < t_max - 1.0e-4f;\n#endif // __KERNELCC__\n}\n\n/**\n * Returns true if in shadow (a hit was found before 't_max' distance\n * Returns false if unoccluded\n * \n * This function also uses NEE++ if enabled in the kernel options and this\n * function can update the visibility map of NEE++ if enabled in 'render_data.nee_plus_plus'\n */\nHIPRT_DEVICE HIPRT_INLINE bool evaluate_shadow_ray_nee_plus_plus(HIPRTRenderData& render_data, hiprtRay ray, float t_max, int last_hit_primitive_index, NEEPlusPlusContext& nee_plus_plus_context, Xorshift32Generator& random_number_generator, int bounce)\n{\n#if DirectLightUseNEEPlusPlusRR == KERNEL_OPTION_TRUE && DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE\n    bool shadow_ray_discarded = false;\n    bool shadow_ray_occluded = false;\n\n    if (render_data.nee_plus_plus.do_update_shadow_rays_traced_statistics)\n        // Updating the statistics\n        hippt::atomic_fetch_add(render_data.nee_plus_plus.total_shadow_ray_queries, 1ull);\n\n    bool nee_plus_plus_envmap_rr_disabled = nee_plus_plus_context.envmap && !render_data.nee_plus_plus.m_enable_nee_plus_plus_RR_for_envmap;\n    bool nee_plus_plus_emissives_rr_disabled = !nee_plus_plus_context.envmap && !render_data.nee_plus_plus.m_enable_nee_plus_plus_RR_for_emissives;\n    if (nee_plus_plus_envmap_rr_disabled || nee_plus_plus_emissives_rr_disabled)\n    {\n        // This is NEE++ RR for envmap sampling but envmap NEE++ RR is disabled\n        nee_plus_plus_context.unoccluded_probability = 1.0f;\n\n        if (render_data.nee_plus_plus.do_update_shadow_rays_traced_statistics)\n            // Updating the statistics\n            hippt::atomic_fetch_add(render_data.nee_plus_plus.shadow_rays_actually_traced, 1ull);\n\n        shadow_ray_occluded = evaluate_shadow_ray_occluded(render_data, ray, t_max, last_hit_primitive_index, bounce, random_number_generator);\n        shadow_ray_discarded = false;\n    }\n\n    // Getting the matrix index from 'estimate_visibility_probability' in case we need to accumulate\n    // visibility in the visibility map with 'accumulate_visibility'. If we do need to do that,\n    // then that matrix index can be reused instead of being recomputed automatically by 'accumulate_visibility'\n    // to save a little bit of computations\n    unsigned int seed_before = random_number_generator.m_state.seed;\n\n    unsigned int nee_plus_plus_hash_grid_cell_index;\n    float visible_probability = nee_plus_plus_context.unoccluded_probability = render_data.nee_plus_plus.estimate_visibility_probability(nee_plus_plus_context, render_data.current_camera, nee_plus_plus_hash_grid_cell_index);\n    bool likely_visible = random_number_generator() < visible_probability;\n\n    if (likely_visible)\n    {\n        if (render_data.nee_plus_plus.do_update_shadow_rays_traced_statistics)\n            // Updating the statistics\n            hippt::atomic_fetch_add(render_data.nee_plus_plus.shadow_rays_actually_traced, 1ull);\n\n        // The shadow ray is likely visible, testing with a shadow ray\n        shadow_ray_occluded = evaluate_shadow_ray_occluded(render_data, ray, t_max, last_hit_primitive_index, bounce, random_number_generator);\n        shadow_ray_discarded = false;\n\n        if (render_data.nee_plus_plus.m_update_visibility_map)\n            render_data.nee_plus_plus.accumulate_visibility(!shadow_ray_occluded, nee_plus_plus_hash_grid_cell_index);\n    }\n    else\n    {\n        shadow_ray_discarded = true;\n\n        // NEE++ tells us that these two points are going to be occluded so we're not testing\n        // the shadow ray and assuming occluded instead\n        shadow_ray_occluded = true;\n    }\n#else\n    // Setting this to 1.0f if not using NEE++ so that is has no effect when the caller\n    // divides by it\n    nee_plus_plus_context.unoccluded_probability = 1.0f;\n\n    bool shadow_ray_occluded = evaluate_shadow_ray_occluded(render_data, ray, t_max, last_hit_primitive_index, bounce, random_number_generator);\n\n    // We may still want to update the visibility map\n    if (render_data.nee_plus_plus.m_update_visibility_map && DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE)\n    {\n        unsigned int nee_plus_plus_hash_grid_cell_index = render_data.nee_plus_plus.get_visibility_map_index<true>(nee_plus_plus_context, render_data.current_camera);\n        \n        render_data.nee_plus_plus.accumulate_visibility(!shadow_ray_occluded, nee_plus_plus_hash_grid_cell_index);\n    }\n#endif\n\n#if DirectLightNEEPlusPlusDisplayShadowRaysDiscarded == KERNEL_OPTION_TRUE\n    uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n\n    uint32_t seed = blockIdx.x + blockIdx.y * gridDim.x + 1 + (threadIdx.y >= 4) * 1;\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    Xorshift32Generator color_random(wang_hash(seed));\n\n    ColorRGB32F block_color = ColorRGB32F(color_random(), color_random(), color_random()) * (render_data.render_settings.sample_number + 1);\n\n    if (bounce == DirectLightNEEPlusPlusDisplayShadowRaysDiscardedBounce)\n    {\n        if (shadow_ray_discarded)\n            render_data.buffers.accumulated_ray_colors[pixel_index] = ColorRGB32F();\n        else\n            render_data.buffers.accumulated_ray_colors[pixel_index] = block_color;\n    }\n#endif\n\n    return shadow_ray_occluded;\n}\n\n/**\n * This function shoots a BSDF ray in the BVH containing only the emissive triangles of the scene\n * This may be useful in some algorithms for increased performance\n * \n * Returns true if a hit was found, false otherwise\n */\nHIPRT_DEVICE HIPRT_INLINE bool evaluate_bsdf_light_sample_ray_simplified(const HIPRTRenderData& render_data, hiprtRay ray, float t_max, BSDFLightSampleRayHitInfo& out_light_hit_info, int last_hit_primitive_index, int bounce, Xorshift32Generator& random_number_generator)\n{\n#ifdef __KERNELCC__\n    if (render_data.light_GPU_BVH == nullptr)\n        // Empty scene --> no intersection\n        return false;\n#endif\n\n#ifdef __KERNELCC__\n    ray.maxT = t_max - 1.0e-4f;\n\n    DECLARE_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal, render_data, render_data.light_GPU_BVH, ray, last_hit_primitive_index, random_number_generator);\n\n    hiprtHit shadow_ray_hit = traversal.getNextHit();\n    if (!shadow_ray_hit.hasHit())\n        return false;\n\n    // If we're here, this means that we found a hit that is not\n    // alpha-transparent with a distance < t_max so that's a hit and we're shadowed.\n\n    // Reading the emission of the material\n    int global_triangle_index = render_data.buffers.emissive_triangles_primitive_indices_and_emissive_textures[shadow_ray_hit.primID];\n    int material_index = render_data.buffers.material_indices[global_triangle_index];\n    int emission_texture_index = render_data.buffers.materials_buffer.get_emission_texture_index(material_index);\n\n    TriangleIndices triangle_vertex_indices = load_triangle_vertex_indices(render_data.buffers.triangles_indices, global_triangle_index);\n    TriangleTexcoords triangle_texcoords = load_triangle_texcoords(render_data.buffers.texcoords, triangle_vertex_indices);\n    float2 interpolated_texcoords = uv_interpolate(triangle_texcoords, shadow_ray_hit.uv);\n\n    if (emission_texture_index != MaterialConstants::NO_TEXTURE)\n        out_light_hit_info.hit_emission = get_material_property<ColorRGB32F>(render_data, false, interpolated_texcoords, emission_texture_index);\n        // Getting the shading normal\n    else\n        out_light_hit_info.hit_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n\n    out_light_hit_info.hit_interpolated_texcoords = interpolated_texcoords;\n    out_light_hit_info.hit_shading_normal = get_shading_normal(render_data, hippt::normalize(shadow_ray_hit.normal), triangle_vertex_indices, triangle_texcoords, global_triangle_index, shadow_ray_hit.uv, interpolated_texcoords);\n    out_light_hit_info.hit_geometric_normal = hippt::normalize(shadow_ray_hit.normal);\n    out_light_hit_info.hit_prim_index = global_triangle_index;\n    out_light_hit_info.hit_material_index = material_index;\n    out_light_hit_info.hit_distance = shadow_ray_hit.t;\n\n    return true;\n#else\n    float alpha = 1.0f;\n    // The total distance of our ray. Incremented after each hit\n    // (we may find multiple hits if we hit transparent texture\n    // and keep intersecting the scene)\n    float cumulative_t = 0.0f;\n\n    int global_triangle_index_hit;\n    hiprtHit shadow_ray_hit;\n    do\n    {\n        // We should use ray tracing filter functions here instead of re-tracing new rays\n        shadow_ray_hit = intersect_scene_cpu(render_data, render_data.cpu_only.light_bvh, ray, last_hit_primitive_index, random_number_generator);\n        if (!shadow_ray_hit.hasHit())\n            return false;\n\n        global_triangle_index_hit = render_data.buffers.emissive_triangles_primitive_indices_and_emissive_textures[shadow_ray_hit.primID];\n\n        if (render_data.render_settings.do_alpha_testing)\n            alpha = get_hit_base_color_alpha(render_data, global_triangle_index_hit, shadow_ray_hit.uv);\n        else\n            alpha = 1.0f;\n\n        // Next ray origin\n        ray.origin = ray.origin + ray.direction * shadow_ray_hit.t;\n        cumulative_t += shadow_ray_hit.t;\n\n        // We keep going as long as the alpha is < 1.0f meaning that we hit texture transparency\n    } while (alpha < 1.0f && cumulative_t < t_max - 1.0e-4f);\n\n    bool hit_found = shadow_ray_hit.hasHit() && cumulative_t < t_max - 1.0e-4f;\n\n    if (hit_found)\n    {\n        // If we found a hit and that it is close enough (hit_found conditions)\n\n        int material_index = render_data.buffers.material_indices[global_triangle_index_hit];\n        int emission_texture_index = render_data.buffers.materials_buffer.get_emission_texture_index(material_index);\n\n        TriangleIndices triangle_vertex_indices = load_triangle_vertex_indices(render_data.buffers.triangles_indices, global_triangle_index_hit);\n        TriangleTexcoords triangle_texcoords = load_triangle_texcoords(render_data.buffers.texcoords, triangle_vertex_indices);\n        float2 interpolated_texcoords = uv_interpolate(triangle_texcoords, shadow_ray_hit.uv);\n\n        if (emission_texture_index != MaterialConstants::NO_TEXTURE)\n            out_light_hit_info.hit_emission = get_material_property<ColorRGB32F>(render_data, false, interpolated_texcoords, emission_texture_index);\n        else\n            out_light_hit_info.hit_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n\n        out_light_hit_info.hit_interpolated_texcoords = interpolated_texcoords;\n        out_light_hit_info.hit_shading_normal = get_shading_normal(render_data, hippt::normalize(shadow_ray_hit.normal), triangle_vertex_indices, triangle_texcoords, global_triangle_index_hit, shadow_ray_hit.uv, interpolated_texcoords);\n        out_light_hit_info.hit_geometric_normal = hippt::normalize(shadow_ray_hit.normal);\n        out_light_hit_info.hit_prim_index = global_triangle_index_hit;\n        out_light_hit_info.hit_material_index = material_index;\n        out_light_hit_info.hit_distance = cumulative_t;\n\n        return true;\n    }\n    else\n        return false;\n#endif // __KERNELCC__\n}\n\n/**\n * Returns true if in shadow, false otherwise.\n * \n * Also, if a hit was found, outputs the emission of the material at the hit point in 'out_hit_emission'\n */\nHIPRT_DEVICE HIPRT_INLINE bool evaluate_bsdf_light_sample_ray(const HIPRTRenderData& render_data, hiprtRay ray, float t_max, BSDFLightSampleRayHitInfo& out_light_hit_info, int last_hit_primitive_index, int bounce, Xorshift32Generator& random_number_generator)\n{\n#ifdef __KERNELCC__\n    if (render_data.GPU_BVH == nullptr)\n        // Empty scene --> no intersection\n        return false;\n#endif\n\n#ifdef __KERNELCC__\n    ray.maxT = t_max - 1.0e-4f;\n\n    DECLARE_HIPRT_CLOSEST_HIT_TRAVERSAL(traversal, render_data, render_data.GPU_BVH, ray, last_hit_primitive_index, random_number_generator);\n\n    hiprtHit shadow_ray_hit = traversal.getNextHit();\n    if (!shadow_ray_hit.hasHit())\n        return false;\n\n    // If we're here, this means that we found a hit that is not\n    // alpha-transparent with a distance < t_max so that's a hit and we're shadowed.\n\n    // Reading the emission of the material\n    int material_index = render_data.buffers.material_indices[shadow_ray_hit.primID];\n    int emission_texture_index = render_data.buffers.materials_buffer.get_emission_texture_index(material_index);\n\n    TriangleIndices triangle_vertex_indices = load_triangle_vertex_indices(render_data.buffers.triangles_indices, shadow_ray_hit.primID);\n    TriangleTexcoords triangle_texcoords = load_triangle_texcoords(render_data.buffers.texcoords, triangle_vertex_indices);\n    float2 interpolated_texcoords = uv_interpolate(triangle_texcoords, shadow_ray_hit.uv);\n\n    if (emission_texture_index != MaterialConstants::NO_TEXTURE)\n        out_light_hit_info.hit_emission = get_material_property<ColorRGB32F>(render_data, false, interpolated_texcoords, emission_texture_index);\n        // Getting the shading normal\n    else\n        out_light_hit_info.hit_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n\n    out_light_hit_info.hit_interpolated_texcoords = interpolated_texcoords;\n    out_light_hit_info.hit_shading_normal = get_shading_normal(render_data, hippt::normalize(shadow_ray_hit.normal), triangle_vertex_indices, triangle_texcoords, shadow_ray_hit.primID, shadow_ray_hit.uv, interpolated_texcoords);\n    out_light_hit_info.hit_geometric_normal = hippt::normalize(shadow_ray_hit.normal);\n    out_light_hit_info.hit_prim_index = shadow_ray_hit.primID;\n    out_light_hit_info.hit_material_index = material_index;\n    out_light_hit_info.hit_distance = shadow_ray_hit.t;\n\n    return true;\n#else\n    float alpha = 1.0f;\n    // The total distance of our ray. Incremented after each hit\n    // (we may find multiple hits if we hit transparent texture\n    // and keep intersecting the scene)\n    float cumulative_t = 0.0f;\n\n    hiprtHit shadow_ray_hit;\n    do\n    {\n        // We should use ray tracing filter functions here instead of re-tracing new rays\n        shadow_ray_hit = intersect_scene_cpu(render_data, render_data.cpu_only.bvh, ray, last_hit_primitive_index, random_number_generator);\n        if (!shadow_ray_hit.hasHit())\n            return false;\n\n        if (render_data.render_settings.do_alpha_testing)\n            alpha = get_hit_base_color_alpha(render_data, shadow_ray_hit);\n        else\n            alpha = 1.0f;\n\n        // Next ray origin\n        ray.origin = ray.origin + ray.direction * shadow_ray_hit.t;\n        cumulative_t += shadow_ray_hit.t;\n\n        // We keep going as long as the alpha is < 1.0f meaning that we hit texture transparency\n    } while (alpha < 1.0f && cumulative_t < t_max - 1.0e-4f);\n\n    bool hit_found = shadow_ray_hit.hasHit() && cumulative_t < t_max - 1.0e-4f;\n\n    if (hit_found)\n    {\n        // If we found a hit and that it is close enough (hit_found conditions)\n\n        int material_index = render_data.buffers.material_indices[shadow_ray_hit.primID];\n        int emission_texture_index = render_data.buffers.materials_buffer.get_emission_texture_index(material_index);\n\n        TriangleIndices triangle_vertex_indices = load_triangle_vertex_indices(render_data.buffers.triangles_indices, shadow_ray_hit.primID);\n        TriangleTexcoords triangle_texcoords = load_triangle_texcoords(render_data.buffers.texcoords, triangle_vertex_indices);\n        float2 interpolated_texcoords = uv_interpolate(triangle_texcoords, shadow_ray_hit.uv);\n\n        if (emission_texture_index != MaterialConstants::NO_TEXTURE)\n            out_light_hit_info.hit_emission = get_material_property<ColorRGB32F>(render_data, false, interpolated_texcoords, emission_texture_index);\n        else \n            out_light_hit_info.hit_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n\n        out_light_hit_info.hit_interpolated_texcoords = interpolated_texcoords;\n        out_light_hit_info.hit_shading_normal = get_shading_normal(render_data, hippt::normalize(shadow_ray_hit.normal), triangle_vertex_indices, triangle_texcoords, shadow_ray_hit.primID, shadow_ray_hit.uv, interpolated_texcoords);\n        out_light_hit_info.hit_geometric_normal = hippt::normalize(shadow_ray_hit.normal);\n        out_light_hit_info.hit_prim_index = shadow_ray_hit.primID;\n        out_light_hit_info.hit_material_index = material_index;\n        out_light_hit_info.hit_distance = cumulative_t;\n\n        return true;\n    }\n    else\n        return false;\n#endif // __KERNELCC__\n}\n\nHIPRT_DEVICE hiprtHit simple_closest_hit(const HIPRTRenderData& render_data, hiprtRay ray, int last_primitive_index, Xorshift32Generator& random_number_generator)\n{\n    hiprtHit hit;\n\n#ifdef __KERNELCC__\n    // Payload for the alpha testing filter function\n    FilterFunctionPayload payload;\n    payload.render_data = &render_data;\n    payload.random_number_generator = &random_number_generator;\n    payload.last_hit_primitive_index = last_primitive_index;\n\n#if UseSharedStackBVHTraversal == KERNEL_OPTION_TRUE\n#if SharedStackBVHTraversalSize > 0\n    hiprtSharedStackBuffer shared_stack_buffer{ SharedStackBVHTraversalSize, shared_stack_cache };\n#else\n    hiprtSharedStackBuffer shared_stack_buffer{ 0, nullptr };\n#endif\n    hiprtGlobalStack global_stack(render_data.global_traversal_stack_buffer, shared_stack_buffer);\n\n    hiprtGeomTraversalClosestCustomStack<hiprtGlobalStack> traversal(render_data.GPU_BVH, ray, global_stack, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0);\n#else\n    hiprtGeomTraversalClosest traversal(render_data.GPU_BVH, ray, hiprtTraversalHintDefault, &payload, render_data.hiprt_function_table, 0);\n#endif\n\n    hit = traversal.getNextHit();\n#else\n    hit = intersect_scene_cpu(render_data, render_data.cpu_only.bvh, ray, last_primitive_index, random_number_generator);\n#endif\n\n    return hit;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/LightSampling/Envmap.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_ENVMAP_H\n#define DEVICE_ENVMAP_H\n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/Texture.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n/**\n * References:\n * \n * [1] [GLSL Path Tracer implementation by knightcrawler25] https://github.com/knightcrawler25/GLSL-PathTracer\n * [2] [PBR Book 3rd Ed - Infinite Light Sampling] https://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Sampling_Light_Sources\n */ \n\n/**\n * This function expects 'direction' to be in world space\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F eval_envmap_no_pdf(const WorldSettings& world_settings, const float3& direction)\n{\n    // Bringing the direction in envmap space for sampling the envmap\n    float3 rotated_direction = matrix_X_vec(world_settings.world_to_envmap_matrix, direction);\n\n    float u = 0.5f + atan2(rotated_direction.z, rotated_direction.x) * M_INV_2_PI;\n    float v = 0.5f + asin(rotated_direction.y) * M_INV_PI;\n\n    return sample_environment_map_texture(world_settings, make_float2(u, v));\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void envmap_cdf_search(const WorldSettings& world_settings, float value, int& x, int& y)\n{\n    //First searching a line to sample\n    unsigned int lower = 0;\n    int upper = world_settings.envmap_height - 1;\n\n    int x_index = world_settings.envmap_width - 1;\n    while (lower < upper)\n    {\n        int y_index = static_cast<int>(floorf((lower + upper) * 0.5f));\n        int env_map_index = y_index * world_settings.envmap_width + x_index;\n\n        if (value < world_settings.envmap_cdf[env_map_index])\n            upper = y_index;\n        else\n            lower = y_index + 1;\n    }\n    y = hippt::max(hippt::min(lower, world_settings.envmap_height), 0u);\n\n    //Then sampling the line itself\n    lower = 0;\n    upper = world_settings.envmap_width - 1;\n\n    int y_index = y;\n    while (lower < upper)\n    {\n        int x_idx = static_cast<int>(floorf((lower + upper) * 0.5f));\n        int env_map_index = y_index * world_settings.envmap_width + x_idx;\n\n        if (value < world_settings.envmap_cdf[env_map_index])\n            upper = x_idx;\n        else\n            lower = x_idx + 1;\n    }\n    x = hippt::max(hippt::min(lower, world_settings.envmap_width), 0u);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F envmap_sample(const WorldSettings& world_settings, float3& sampled_direction, float& envmap_pdf, Xorshift32Generator& random_number_generator)\n{\n#if EnvmapSamplingStrategy == ESS_NO_SAMPLING\n    envmap_pdf = 0.0f;\n\n    return ColorRGB32F();\n#endif\n\n    int x, y;\n    float env_map_total_sum = world_settings.envmap_total_sum;\n\n#if EnvmapSamplingStrategy == ESS_BINARY_SEARCH\n    // Importance sampling a texel of the envmap with a binary search on the CDF\n    envmap_cdf_search(world_settings, random_number_generator() * env_map_total_sum, x, y);\n#elif EnvmapSamplingStrategy == ESS_ALIAS_TABLE\n    int random_index = world_settings.envmap_alias_table.sample(random_number_generator);\n\n    y = static_cast<int>(floorf(random_index / static_cast<float>(world_settings.envmap_width)));\n    x = static_cast<int>(floorf(random_index - y * static_cast<float>(world_settings.envmap_width)));\n#endif\n\n    // Converting to UV coordinates\n    float u = static_cast<float>(x) / world_settings.envmap_width;\n    float v = static_cast<float>(y) / world_settings.envmap_height;\n\n    // Converting to polar coordinates\n    float phi = u * M_TWO_PI;\n    // Clamping because a theta of 0.0f would mean straight up which means singularity\n    // which means not good for numerical stability\n    float theta = hippt::max(1.0e-5f, v * M_PI);\n\n    // Convert to cartesian coordinates\n    float cos_theta = cos(theta);\n    float sin_theta = sin(theta);\n    // Using this formula here instead of the usual (sin_theta * cos(phi), sin_theta * sin(phi), cos_theta)\n    // because we want our envmap to be Y-up\n    sampled_direction = make_float3(-sin_theta * cos(phi), -cos_theta, -sin_theta * sin(phi));\n\n    // Taking envmap rotation into account to bring the direction in world space\n    sampled_direction = matrix_X_vec(world_settings.envmap_to_world_matrix, sampled_direction);\n\n    ColorRGB32F env_map_radiance = sample_environment_map_texture(world_settings, make_float2(u, v));\n    // Computing envmap PDF\n    envmap_pdf = 1.0f;\n#if EnvmapSamplingStrategy == ESS_BINARY_SEARCH || EnvmapSamplingStrategy == ESS_ALIAS_TABLE \n    // The texel was sampled according to its luminance\n    envmap_pdf = env_map_radiance.luminance() / (env_map_total_sum * world_settings.envmap_intensity);\n\n    // Account for the fact that the envmap texels have some area in the world\n    envmap_pdf *= world_settings.envmap_width * world_settings.envmap_height;\n#endif\n\n    // Converting the PDF from area measure on the envmap to solid angle measure\n    envmap_pdf /= (M_TWO_PI_SQUARED * sin_theta);\n\n    return env_map_radiance;\n}\n\n/**\n * This function expects the given direction to be in world space i.e.\n * the direction is already rotated by the envmap rotation matrix\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F envmap_eval(const HIPRTRenderData& render_data, const float3& direction, float& pdf)\n{\n#if EnvmapSamplingStrategy == ESS_NO_SAMPLING\n    pdf = 0.0f;\n\n    return ColorRGB32F();\n#endif\n\n    const WorldSettings& world_settings = render_data.world_settings;\n\n    ColorRGB32F envmap_radiance = eval_envmap_no_pdf(world_settings, direction);\n\n    float envmap_total_sum = world_settings.envmap_total_sum;\n\n    float theta_bsdf_dir = acos(-direction.y);\n    float sin_theta = sin(theta_bsdf_dir);\n\n#if EnvmapSamplingStrategy == ESS_BINARY_SEARCH || EnvmapSamplingStrategy == ESS_ALIAS_TABLE \n    // The texel was sampled according to its luminance\n    pdf = envmap_radiance.luminance() / (envmap_total_sum * render_data.world_settings.envmap_intensity);\n\n    // Account for the fact that the envmap texels have some area in the world\n    pdf *= world_settings.envmap_width * world_settings.envmap_height;\n#endif\n\n    // Converting from \"texel on envmap measure\" to solid angle\n    pdf /= (M_TWO_PI_SQUARED * sin_theta);\n\n    return envmap_radiance;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_environment_map_with_mis(HIPRTRenderData& render_data, RayPayload& ray_payload, HitInfo& closest_hit_info,\n    const float3& view_direction, \n    Xorshift32Generator& random_number_generator)\n{\n    float envmap_pdf;\n    float3 sampled_direction;\n    ColorRGB32F envmap_color = envmap_sample(render_data.world_settings, sampled_direction, envmap_pdf, random_number_generator);\n    ColorRGB32F envmap_mis_contribution;\n\n    if (MaterialUtils::can_do_light_sampling(ray_payload.material))\n    {\n        // Sampling the envmap with MIS\n        float cosine_term = hippt::dot(closest_hit_info.shading_normal, sampled_direction);\n        if (envmap_pdf > 0.0f && cosine_term > 0.0f)\n        {\n            hiprtRay shadow_ray;\n            shadow_ray.origin = closest_hit_info.inter_point;\n            shadow_ray.direction = sampled_direction;\n\n            NEEPlusPlusContext nee_plus_plus_context;\n            nee_plus_plus_context.shaded_point = closest_hit_info.inter_point;\n            nee_plus_plus_context.point_on_light = sampled_direction;\n            nee_plus_plus_context.envmap = true;\n            bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, 1.0e35f, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n            if (!in_shadow)\n            {\n                float bsdf_pdf;\n                BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO; \n                BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, sampled_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, EnvmapSamplingDoBSDFMIS ? MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS : MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n                ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n#if EnvmapSamplingDoBSDFMIS\n                float mis_weight = balance_heuristic(envmap_pdf, bsdf_pdf);\n#else\n                float mis_weight = 1.0f;\n#endif\n\n                envmap_mis_contribution = bsdf_color * cosine_term * mis_weight * envmap_color / envmap_pdf / nee_plus_plus_context.unoccluded_probability;\n            }\n        }\n    }\n\n\n\n#if EnvmapSamplingDoBSDFMIS\n    float bsdf_sample_pdf;\n    float3 bsdf_sampled_dir;\n    ColorRGB32F bsdf_color;\n    ColorRGB32F bsdf_mis_contribution;\n\n    BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n    BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n    bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, bsdf_sampled_dir, bsdf_sample_pdf, random_number_generator);\n\n    // Sampling the BSDF with MIS\n    float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, bsdf_sampled_dir));\n    if (bsdf_sample_pdf > 0.0f)\n    {\n        hiprtRay shadow_ray;\n        shadow_ray.origin = closest_hit_info.inter_point;\n        shadow_ray.direction = bsdf_sampled_dir;\n\n        bool in_shadow = evaluate_shadow_ray_occluded(render_data, shadow_ray, 1.0e35f, closest_hit_info.primitive_index, ray_payload.bounce, random_number_generator);\n        if (!in_shadow)\n        {\n            float envmap_eval_pdf;\n            ColorRGB32F envmap_radiance = envmap_eval(render_data, bsdf_sampled_dir, envmap_eval_pdf);\n            if (envmap_eval_pdf > 0.0f)\n            {\n                float mis_weight = balance_heuristic(bsdf_sample_pdf, envmap_eval_pdf);\n                bsdf_mis_contribution = envmap_radiance * mis_weight * cosine_term * bsdf_color / bsdf_sample_pdf;\n            }\n        }\n    }\n\n    return bsdf_mis_contribution + envmap_mis_contribution;\n#else\n    return envmap_mis_contribution;\n#endif\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_environment_map(HIPRTRenderData& render_data, RayPayload& ray_payload, HitInfo& closest_hit_info, \n    const float3& view_direction, \n    Xorshift32Generator& random_number_generator)\n{\n    const WorldSettings& world_settings = render_data.world_settings;\n\n    if (world_settings.ambient_light_type != AmbientLightType::ENVMAP || render_data.bsdfs_data.white_furnace_mode)\n        // Not using the envmap\n        return ColorRGB32F(0.0f);\n\n    if (world_settings.envmap_intensity <= 0.0f)\n        // No need to sample the envmap if the user has set the intensity to 0\n        return ColorRGB32F(0.0f);\n\n    if (ray_payload.bounce == 0 && DirectLightSamplingStrategy == LSS_RESTIR_DI)\n        // The envmap lighting is handled by ReSTIR DI on the first bounce\n        return ColorRGB32F(0.0f);\n\n#if EnvmapSamplingStrategy == ESS_NO_SAMPLING\n    return ColorRGB32F(0.0f);\n#else\n    return sample_environment_map_with_mis(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/LightSampling/LightUtils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_LIGHT_UTILS_H\n#define DEVICE_LIGHT_UTILS_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/LightSampling/PDFConversion.h\"\n#include \"Device/includes/ReSTIR/ReGIR/Settings.h\"\n#include \"Device/includes/ReSTIR/ReGIR/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/ReGIR/ShadingAdditionalInfo.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_DEVICE HIPRT_INLINE float3 get_triangle_normal_not_normalized(const HIPRTRenderData& render_data, int triangle_index)\n{\n    int triangle_index_start = triangle_index * 3;\n\n    float3 vertex_A = render_data.buffers.vertices_positions[render_data.buffers.triangles_indices[triangle_index_start + 0]];\n    float3 vertex_B = render_data.buffers.vertices_positions[render_data.buffers.triangles_indices[triangle_index_start + 1]];\n    float3 vertex_C = render_data.buffers.vertices_positions[render_data.buffers.triangles_indices[triangle_index_start + 2]];\n\n    float3 AB = vertex_B - vertex_A;\n    float3 AC = vertex_C - vertex_A;\n\n    return hippt::cross(AB, AC);\n}\n\nHIPRT_DEVICE HIPRT_INLINE float triangle_area(const HIPRTRenderData& render_data, int triangle_index)\n{\n    return render_data.buffers.triangles_areas[triangle_index];\n}\n\nHIPRT_DEVICE ColorRGB32F get_emission_of_triangle_from_index(const HIPRTRenderData& render_data, int triangle_index)\n{\n    return render_data.buffers.materials_buffer.get_emission(render_data.buffers.material_indices[triangle_index]);\n}\n\n/**\n * Returns the PDF (area measure) of the light sampler for the given triangle_hit_info\n *\n * 'primitive_index' is the index of the emissive triangle hit\n * 'shading_normal' is the shading normal at the intersection point of the emissive triangle hit\n * 'hit_distance' is the distance to the intersection point on the hit triangle\n * 'ray_direction' is the direction of the ray that hit the triangle. The direction points towards the triangle.\n */\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_area_measure(const HIPRTRenderData& render_data, float light_area, ColorRGB32F light_emission)\n{\n    float hit_distance = 1.0f;\n    float area_measure_pdf;\n\n    // Note that for ReGIR, we cannot have the exact light PDF since ReGIR is based on RIS so we're\n    // faking it with whatever base strategy ReGIR is using\n\n    if constexpr (lightSamplingStrategy == LSS_BASE_UNIFORM)\n    {\n        // Surface area PDF of hitting that point on that triangle in the scene\n        area_measure_pdf = 1.0f / light_area;\n        area_measure_pdf /= render_data.buffers.emissive_triangles_count;\n    }\n    else if constexpr (lightSamplingStrategy == LSS_BASE_POWER)\n    {\n        area_measure_pdf = 1.0f / light_area;\n        area_measure_pdf *= (light_emission.luminance() * light_area) / render_data.buffers.emissives_power_alias_table.sum_elements;\n    }\n    else if constexpr (lightSamplingStrategy == LSS_BASE_REGIR)\n        // Faking the ReGIR PDF with the PDF of its base sampling strategy\n        area_measure_pdf = pdf_of_emissive_triangle_hit_area_measure<ReGIR_GridFillLightSamplingBaseStrategy>(render_data, light_area, light_emission);\n\n\n    return area_measure_pdf;\n}\n\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_area_measure(const HIPRTRenderData& render_data, int hit_primitive_index, ColorRGB32F light_emission)\n{\n    return pdf_of_emissive_triangle_hit_area_measure<lightSamplingStrategy>(render_data, triangle_area(render_data, hit_primitive_index), light_emission);\n}\n\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_area_measure(const HIPRTRenderData& render_data, const BSDFLightSampleRayHitInfo& light_hit_info)\n{\n    return pdf_of_emissive_triangle_hit_area_measure<lightSamplingStrategy>(render_data, light_hit_info.hit_prim_index, light_hit_info.hit_emission);\n}\n\n/**\n * Returns the PDF (solid angle measure) of the light sampler for the given 'light_hit_info'\n *\n * Note that for light samplers that cannot be point-evaluated (ReGIR for example: we cannot compute a RIS PDF),\n * the returned PDF is an approximation\n *\n * 'primitive_index' is the index of the emissive triangle hit\n * 'shading_normal' is the shading normal at the intersection point of the emissive triangle hit\n * 'hit_distance' is the distance to the intersection point on the hit triangle\n * 'to_light_direction' is the direction of the ray that hit the triangle. The direction points towards the triangle.\n */\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_solid_angle(const HIPRTRenderData& render_data,\n    float light_area,\n    ColorRGB32F light_emission, float3 light_surface_normal,\n    float hit_distance, float3 to_light_direction)\n{\n    // abs() here to allow backfacing lights\n    // Without abs() here:\n    //  - We could be hitting the back of an emissive triangle (think of quad light hanging in the air)\n    //  --> triangle normal not facing the same way \n    //  --> cos_angle negative\n    float cosine_light_source = compute_cosine_term_at_light_source(light_surface_normal, -to_light_direction);\n\n    float pdf_area_measure = pdf_of_emissive_triangle_hit_area_measure<lightSamplingStrategy>(render_data, light_area, light_emission);\n\n    return area_to_solid_angle_pdf(pdf_area_measure, hit_distance, cosine_light_source);\n}\n\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_solid_angle(const HIPRTRenderData& render_data, int hit_primitive_index,\n    ColorRGB32F light_emission, float3 light_surface_normal,\n    float hit_distance, float3 to_light_direction)\n{\n    return pdf_of_emissive_triangle_hit_solid_angle<lightSamplingStrategy>(render_data, triangle_area(render_data, hit_primitive_index),\n        light_emission, light_surface_normal, hit_distance, to_light_direction);\n}\n\ntemplate <int lightSamplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE float pdf_of_emissive_triangle_hit_solid_angle(const HIPRTRenderData& render_data, const BSDFLightSampleRayHitInfo& light_hit_info, float3 to_light_direction)\n{\n    return pdf_of_emissive_triangle_hit_solid_angle<lightSamplingStrategy>(render_data,\n        light_hit_info.hit_prim_index, light_hit_info.hit_emission, light_hit_info.hit_geometric_normal,\n        light_hit_info.hit_distance, to_light_direction);\n}\n\n/**\n * Reference: [A Low-Distortion Map Between Triangle and Square, Heitz, 2019]\n * \n * Maps a point in a square to a point in an arbitrary triangle\n */\nHIPRT_DEVICE HIPRT_INLINE float2 square_to_triangle(float& x, float& y)\n{\n    if (y > x)\n    {\n        x *= 0.5f;\n        y -= x;\n    }\n    else\n    {\n        y *= 0.5f;\n        x -= y;\n    }\n\n\treturn make_float2(x, y);\n}\n\n/**\n * Samples a point uniformly on the given triangle (given with the triangle index)\n *\n * Returns true if the sampling was successful, false otherwise (can fail if the triangle is way too small or degenerate)\n */\nHIPRT_DEVICE HIPRT_INLINE bool sample_point_on_generic_triangle(int global_triangle_index, const float3* vertices_positions, const int* triangles_indices, Xorshift32Generator& rng,\n    float3& out_sample_point, float3& out_sampled_triangle_normal, float& out_triangle_area)\n{\n    float3 vertex_A = vertices_positions[triangles_indices[global_triangle_index * 3 + 0]];\n    float3 vertex_B = vertices_positions[triangles_indices[global_triangle_index * 3 + 1]];\n    float3 vertex_C = vertices_positions[triangles_indices[global_triangle_index * 3 + 2]];\n\n    float rand_1 = rng();\n    float rand_2 = rng();\n\n#if TrianglePointSamplingStrategy == TRIANGLE_POINT_SAMPLING_TURK_1990\n    float sqrt_r1 = sqrt(rand_1);\n    float u = 1.0f - sqrt_r1;\n    float v = (1.0f - rand_2) * sqrt_r1;\n#elif TrianglePointSamplingStrategy == TRIANGLE_POINT_SAMPLING_HEITZ_2019\n    float2 remapped = square_to_triangle(rand_1, rand_2);\n\n    float u = remapped.x;\n    float v = remapped.y;\n#endif\n\n    float3 AB = vertex_B - vertex_A;\n    float3 AC = vertex_C - vertex_A;\n    float3 normal = hippt::cross(AB, AC);\n\n    float length_normal = hippt::length(normal);\n    // TODO the normal length check used to be for some NaNs that occured on degenerate triangles but doesn't seem to happen anymore\n    /*if (length_normal <= 1.0e-6f)\n        return false;*/\n\n    float3 random_point_on_triangle = vertex_A + AB * u + AC * v;\n    out_sample_point = random_point_on_triangle;\n    out_sampled_triangle_normal = normal / length_normal;\n    out_triangle_area = 0.5f * length_normal;\n\n    return true;\n}\n\n/**\n * Samples a point uniformly on the given emissive triangle.\n * The given 'emissive_triangle_index' must come from reading the 'emissive_triangle_indices' buffer of the scene.\n *\n * Returns true if the sampling was successful, false otherwise (can fail if the triangle is way too small or degenerate)\n * \n * !!!!! This function is a remnant of some tests. It's actually less performant than\n * sample_point_on_generic_triangle() because of more cache misses for some reason !!!!!\n */\nHIPRT_DEVICE HIPRT_INLINE bool sample_point_on_emissive_triangle(int emissive_triangle_index, \n    const PrecomputedEmissiveTrianglesDataSoADevice& petd, Xorshift32Generator& rng,\n    float3& out_sample_point, float3& out_sampled_triangle_normal, float& out_triangle_area)\n{\n    float rand_1 = rng();\n    float rand_2 = rng();\n\n#if TrianglePointSamplingStrategy == TRIANGLE_POINT_SAMPLING_TURK_1990\n    float sqrt_r1 = sqrt(rand_1);\n    float u = 1.0f - sqrt_r1;\n    float v = (1.0f - rand_2) * sqrt_r1;\n#elif TrianglePointSamplingStrategy == TRIANGLE_POINT_SAMPLING_HEITZ_2019\n    float2 remapped = square_to_triangle(rand_1, rand_2);\n\n    float u = remapped.x;\n    float v = remapped.y;\n#endif\n\n    float3 vertex_A = petd.triangles_A[emissive_triangle_index];\n    float3 AB = petd.triangles_AB[emissive_triangle_index];\n    float3 AC = petd.triangles_AC[emissive_triangle_index];\n    float3 normal = hippt::cross(AB, AC);\n\n    float length_normal = hippt::length(normal);\n    if (length_normal <= 1.0e-6f)\n        return false;\n\n    float3 random_point_on_triangle = vertex_A + AB * u + AC * v;\n    out_sample_point = random_point_on_triangle;\n    out_sampled_triangle_normal = normal / length_normal;\n    out_triangle_area = 0.5f * length_normal;\n\n    return true;\n}\n\n/**\n * The PDF is computed in area measure\n */\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle_uniform(const HIPRTRenderData& render_data, Xorshift32Generator& random_number_generator)\n{\n    if (render_data.buffers.emissive_triangles_count == 0)\n        return LightSampleInformation();\n        \n    LightSampleInformation light_sample;\n\n    int random_emissive_triangle_index = random_number_generator.random_index(render_data.buffers.emissive_triangles_count);\n    int triangle_index = render_data.buffers.emissive_triangles_primitive_indices[random_emissive_triangle_index];\n\n    float sampled_triangle_area;\n    float3 sampled_triangle_normal;\n    float3 random_point_on_triangle;\n\tif (!sample_point_on_generic_triangle(triangle_index, render_data.buffers.vertices_positions,\n\t\trender_data.buffers.triangles_indices, random_number_generator, random_point_on_triangle, sampled_triangle_normal, sampled_triangle_area))\n\t\treturn LightSampleInformation();\n\n    light_sample.emissive_triangle_index = triangle_index;\n    light_sample.light_source_normal = sampled_triangle_normal;\n    light_sample.light_area = sampled_triangle_area;\n    light_sample.emission = render_data.buffers.materials_buffer.get_emission(render_data.buffers.material_indices[triangle_index]);\n    light_sample.point_on_light = random_point_on_triangle;\n\n    // PDF of that point on that triangle\n    light_sample.area_measure_pdf = 1.0f / sampled_triangle_area;\n    // PDF of that triangle sampled uniformly amongst all emissive triangles\n    light_sample.area_measure_pdf /= render_data.buffers.emissive_triangles_count;\n\n    return light_sample;\n}\n\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle_power(const HIPRTRenderData& render_data, Xorshift32Generator& random_number_generator)\n{\n    if (render_data.buffers.emissive_triangles_count == 0)\n        return LightSampleInformation();\n\n    LightSampleInformation out_sample;\n    \n    int random_emissive_triangle_index = render_data.buffers.emissives_power_alias_table.sample(random_number_generator);\n    \n    int triangle_index = render_data.buffers.emissive_triangles_primitive_indices[random_emissive_triangle_index];\n\n    float sampled_triangle_area;\n    float3 sampled_triangle_normal;\n    float3 random_point_on_triangle;\n    if (!sample_point_on_generic_triangle(triangle_index, render_data.buffers.vertices_positions,\n        render_data.buffers.triangles_indices, random_number_generator, random_point_on_triangle, sampled_triangle_normal, sampled_triangle_area))\n        return LightSampleInformation();\n\n    out_sample.emissive_triangle_index = triangle_index;\n    out_sample.light_source_normal = sampled_triangle_normal;\n    out_sample.light_area = sampled_triangle_area;\n    out_sample.emission = render_data.buffers.materials_buffer.get_emission(render_data.buffers.material_indices[triangle_index]);\n    out_sample.point_on_light = random_point_on_triangle;\n\n    // PDF of that point on that triangle\n    out_sample.area_measure_pdf = 1.0f / sampled_triangle_area;\n    // PDF of sampling that triangle according to its power\n    out_sample.area_measure_pdf *= (out_sample.emission.luminance() * sampled_triangle_area) / render_data.buffers.emissives_power_alias_table.sum_elements;\n\n    return out_sample;\n}\n\n// Forward declaration for use in 'sample_one_emissive_triangle_regir' below\ntemplate <int samplingStrategy>\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle(const HIPRTRenderData& render_data,\n    const float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal,\n    int last_hit_primitive_index, RayPayload& ray_payload,\n    Xorshift32Generator& random_number_generator);\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, const ReGIRGridFillSurface& surface, bool primary_hit, float PDF_normalization, float3 point_on_light, float3 light_source_normal, ColorRGB32F emission, Xorshift32Generator& random_number_generator)\n{\n    float sample_PDF_unnormalized;\n    if constexpr (canonicalPDF)\n        sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, surface, primary_hit, emission, light_source_normal, point_on_light, random_number_generator);\n    else\n        sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, surface, primary_hit, emission, light_source_normal, point_on_light, random_number_generator);\n\n    return sample_PDF_unnormalized / PDF_normalization;\n}\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, \n    const ReGIRGridFillSurface& surface, unsigned int grid_cell_index, bool primary_hit,\n    float3 point_on_light, float3 light_source_normal, ColorRGB32F emission, Xorshift32Generator& random_number_generator)\n{\n    float RIS_integral;\n    if constexpr (canonicalPDF)\n        RIS_integral = render_data.render_settings.regir_settings.get_canonical_pre_integration_factor(grid_cell_index, primary_hit);\n    else\n        RIS_integral = render_data.render_settings.regir_settings.get_non_canonical_pre_integration_factor(grid_cell_index, primary_hit);\n    if (RIS_integral == 0.0f)\n        RIS_integral = 1.0f;\n    if (!render_data.render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n        RIS_integral = 1.0f;\n\n    return ReGIR_get_reservoir_sample_ReGIR_PDF<canonicalPDF>(render_data, surface, primary_hit, RIS_integral, point_on_light, light_source_normal, emission, random_number_generator);\n}\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, float3 point_on_light, float3 light_source_normal, ColorRGB32F emission, unsigned int grid_cell_index, bool primary_hit, Xorshift32Generator& random_number_generator)\n{\n    if (emission.is_black())\n        return 0.0f;\n\n    ReGIRGridFillSurface surface = ReGIR_get_cell_surface(render_data, grid_cell_index, primary_hit);\n    return ReGIR_get_reservoir_sample_ReGIR_PDF<canonicalPDF>(render_data, surface, grid_cell_index, primary_hit, point_on_light, light_source_normal, emission, random_number_generator);\n}\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, float3 point_on_light, float3 light_source_normal, ColorRGB32F emission, unsigned int grid_cell_index, float RIS_integral, bool primary_hit, Xorshift32Generator& random_number_generator)\n{\n    if (emission.is_black())\n        return 0.0f;\n\n    ReGIRGridFillSurface surface = ReGIR_get_cell_surface(render_data, grid_cell_index, primary_hit);\n    return ReGIR_get_reservoir_sample_ReGIR_PDF<canonicalPDF>(render_data, surface, primary_hit, RIS_integral, point_on_light, light_source_normal, emission, random_number_generator);\n}\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, const ReGIRReservoir& reservoir, unsigned int grid_cell_index, bool primary_hit, Xorshift32Generator& random_number_generator)\n{\n    if (reservoir.UCW <= 0.0f)\n        return 0.0f;\n\n    float3 point_on_light = reservoir.sample.point_on_light;\n    float3 light_source_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, reservoir.sample.emissive_triangle_index));\n    ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, reservoir.sample.emissive_triangle_index);\n\n    return ReGIR_get_reservoir_sample_ReGIR_PDF<canonicalPDF>(render_data, point_on_light, light_source_normal, emission, grid_cell_index, primary_hit, random_number_generator);\n}\n\ntemplate <bool canonicalPDF>\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_ReGIR_PDF(const HIPRTRenderData& render_data, const ReGIRReservoir& reservoir, unsigned int grid_cell_index, float RIS_integral, bool primary_hit, Xorshift32Generator& random_number_generator)\n{\n    if (reservoir.UCW <= 0.0f)\n        return 0.0f;\n\n    float3 point_on_light = reservoir.sample.point_on_light;\n    float3 light_source_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, reservoir.sample.emissive_triangle_index));\n    ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, reservoir.sample.emissive_triangle_index);\n\n    return ReGIR_get_reservoir_sample_ReGIR_PDF<canonicalPDF>(render_data, point_on_light, light_source_normal, emission, grid_cell_index, RIS_integral, primary_hit, random_number_generator);\n}\n\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_BSDF_PDF(const HIPRTRenderData& render_data,\n\tfloat3 point_on_light, float3 light_source_normal, ColorRGB32F emission,\n    float3 view_direction, float3 shading_point, float3 shading_normal, float3 geometric_normal, BSDFIncidentLightInfo incident_light_info, RayPayload& ray_payload, int last_hit_primitive_index)\n{\n    if (emission.is_black())\n        return 0.0f;\n\n    float3 to_light_direction = point_on_light - shading_point;\n    float distance_to_light = hippt::length(to_light_direction);\n    to_light_direction /= distance_to_light; // Normalization\n\n    BSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, to_light_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n    float bsdf_pdf = bsdf_dispatcher_pdf(render_data, bsdf_context);\n\n    float area_measure_bsdf_pdf = solid_angle_to_area_pdf(bsdf_pdf, distance_to_light, compute_cosine_term_at_light_source(light_source_normal, -to_light_direction));\n\n    return area_measure_bsdf_pdf;\n}\n\nHIPRT_DEVICE float ReGIR_get_reservoir_sample_BSDF_PDF(const HIPRTRenderData& render_data, const ReGIRReservoir& reservoir,\n    float3 view_direction, float3 shading_point, float3 shading_normal, float3 geometric_normal, BSDFIncidentLightInfo incident_light_info, RayPayload& ray_payload, int last_hit_primitive_index, Xorshift32Generator& random_number_generator)\n{\n    if (reservoir.UCW <= 0.0f)\n        return 0.0f;\n\n    float3 point_on_light = reservoir.sample.point_on_light;\n    float3 light_source_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, reservoir.sample.emissive_triangle_index));\n    ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, reservoir.sample.emissive_triangle_index);\n\n    return ReGIR_get_reservoir_sample_BSDF_PDF(render_data,\n        point_on_light, light_source_normal, emission, \n        view_direction, shading_point, shading_normal, geometric_normal, incident_light_info, ray_payload, last_hit_primitive_index);\n}\n\nstruct ReGIRPairwiseMIS\n{\n    HIPRT_DEVICE float compute_MIS_weight_normalization(const HIPRTRenderData& render_data, unsigned int valid_non_canonical_neighbors)\n    {\n        unsigned int number_of_samples = 0;\n        number_of_samples += valid_non_canonical_neighbors * render_data.render_settings.regir_settings.shading.reservoir_tap_count_per_neighbor; // non canonical samples\n        if (number_of_samples == 0)\n            return 0.0f;\n\n        return 1.0f / number_of_samples;\n    }\n\n    HIPRT_DEVICE void sum_non_canonical_sample_to_canonical_weights(const HIPRTRenderData& render_data, \n        float3 canonical_technique_1_point_on_light, float3 canonical_technique_1_light_normal, ColorRGB32F canonical_technique_1_emission,\n        float3 canonical_technique_2_point_on_light, float3 canonical_technique_2_light_normal, ColorRGB32F canonical_technique_2_emission,\n        float3 canonical_technique_3_point_on_light, float3 canonical_technique_3_light_normal, ColorRGB32F canonical_technique_3_emission,\n\n        float canonical_technique_1_canonical_reservoir_1_pdf, float canonical_technique_1_canonical_reservoir_2_pdf, float canonical_technique_1_canonical_reservoir_3_pdf,\n        float canonical_technique_2_canonical_reservoir_1_pdf, float canonical_technique_2_canonical_reservoir_2_pdf, float canonical_technique_2_canonical_reservoir_3_pdf,\n        float canonical_technique_3_canonical_reservoir_1_pdf, float canonical_technique_3_canonical_reservoir_2_pdf, float canonical_technique_3_canonical_reservoir_3_pdf,\n        float mis_weight_normalization,\n        \n        ReGIRGridFillSurface neighbor_surface, float neighbor_non_canonical_RIS_integral, bool is_primary_hit,\n        Xorshift32Generator& random_number_generator)\n    {\n        float non_canonical_neighbor_technique_canonical_reservoir_1_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, is_primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_1_point_on_light, canonical_technique_1_light_normal, canonical_technique_1_emission, random_number_generator);\n        m_sum_canonical_weight_1 += canonical_technique_1_canonical_reservoir_1_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_1_pdf + canonical_technique_1_canonical_reservoir_1_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_1_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_1_pdf * mis_weight_normalization);\n\n        float non_canonical_neighbor_technique_canonical_reservoir_2_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, is_primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_2_point_on_light, canonical_technique_2_light_normal, canonical_technique_2_emission, random_number_generator);\n        m_sum_canonical_weight_2 += canonical_technique_2_canonical_reservoir_2_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_2_pdf + canonical_technique_1_canonical_reservoir_2_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_2_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_2_pdf * mis_weight_normalization);\n\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n        float non_canonical_neighbor_technique_canonical_reservoir_3_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, is_primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_3_point_on_light, canonical_technique_3_light_normal, canonical_technique_3_emission, random_number_generator);\n        m_sum_canonical_weight_3 += canonical_technique_3_canonical_reservoir_3_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_3_pdf + canonical_technique_1_canonical_reservoir_3_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_3_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_3_pdf * mis_weight_normalization);\n#endif\n    }\n\n    HIPRT_DEVICE float compute_MIS_weight_for_non_canonical_sample(const HIPRTRenderData& render_data,\n        float3 sample_point_on_light, float3 sample_light_source_normal, ColorRGB32F sample_emission,\n\n        float3 canonical_technique_1_point_on_light, float3 canonical_technique_1_light_normal, ColorRGB32F canonical_technique_1_emission,\n        float3 canonical_technique_2_point_on_light, float3 canonical_technique_2_light_normal, ColorRGB32F canonical_technique_2_emission,\n        float3 canonical_technique_3_point_on_light, float3 canonical_technique_3_light_normal, ColorRGB32F canonical_technique_3_emission,\n\n        const ReGIRGridFillSurface& center_grid_cell_surface, bool primary_hit,\n\n        float canonical_technique_1_canonical_reservoir_1_pdf, float canonical_technique_1_canonical_reservoir_2_pdf, float canonical_technique_1_canonical_reservoir_3_pdf,\n        float canonical_technique_2_canonical_reservoir_1_pdf, float canonical_technique_2_canonical_reservoir_2_pdf, float canonical_technique_2_canonical_reservoir_3_pdf,\n        float canonical_technique_3_canonical_reservoir_1_pdf, float canonical_technique_3_canonical_reservoir_2_pdf, float canonical_technique_3_canonical_reservoir_3_pdf,\n        float mis_weight_normalization,\n\n        float non_canonical_RIS_integral_center_grid_cell, float canonical_RIS_integral_center_grid_cell,\n        float non_canonical_sample_PDF,\n\n        ReGIRGridFillSurface neighbor_surface, float neighbor_non_canonical_RIS_integral,\n\n        float3 view_direction, float3 shading_point, float3 shading_normal, float3 geometric_normal, RayPayload& ray_payload, int last_hit_primitive_index,\n\n        Xorshift32Generator& random_number_generator)\n    {\n        float non_canonical_PDF = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, center_grid_cell_surface, primary_hit, non_canonical_RIS_integral_center_grid_cell, sample_point_on_light, sample_light_source_normal, sample_emission, random_number_generator);\n        float canonical_PDF = ReGIR_get_reservoir_sample_ReGIR_PDF<true>(render_data, center_grid_cell_surface, primary_hit, canonical_RIS_integral_center_grid_cell, sample_point_on_light, sample_light_source_normal, sample_emission, random_number_generator);\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n        float bsdf_PDF = ReGIR_get_reservoir_sample_BSDF_PDF(render_data, sample_point_on_light, sample_light_source_normal, sample_emission, view_direction, shading_point, shading_normal, geometric_normal, BSDFIncidentLightInfo::NO_INFO, ray_payload, last_hit_primitive_index);\n#else\n        float bsdf_PDF = 0.0f;\n#endif\n\n        float mis_weight = mis_weight_normalization * (non_canonical_sample_PDF / (non_canonical_sample_PDF + non_canonical_PDF * mis_weight_normalization + canonical_PDF * mis_weight_normalization + bsdf_PDF * mis_weight_normalization));\n\n        // Summing the weights for the canonical MIS weight computation\n        float non_canonical_neighbor_technique_canonical_reservoir_1_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_1_point_on_light, canonical_technique_1_light_normal, canonical_technique_1_emission, random_number_generator);\n        m_sum_canonical_weight_1 += canonical_technique_1_canonical_reservoir_1_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_1_pdf + canonical_technique_1_canonical_reservoir_1_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_1_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_1_pdf * mis_weight_normalization);\n\n        float non_canonical_neighbor_technique_canonical_reservoir_2_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_2_point_on_light, canonical_technique_2_light_normal, canonical_technique_2_emission, random_number_generator);\n        m_sum_canonical_weight_2 += canonical_technique_2_canonical_reservoir_2_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_2_pdf + canonical_technique_1_canonical_reservoir_2_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_2_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_2_pdf * mis_weight_normalization);\n\n        float non_canonical_neighbor_technique_canonical_reservoir_3_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, neighbor_surface, primary_hit, neighbor_non_canonical_RIS_integral, canonical_technique_3_point_on_light, canonical_technique_3_light_normal, canonical_technique_3_emission, random_number_generator);\n        m_sum_canonical_weight_3 += canonical_technique_3_canonical_reservoir_3_pdf * mis_weight_normalization / (non_canonical_neighbor_technique_canonical_reservoir_3_pdf + canonical_technique_1_canonical_reservoir_3_pdf * mis_weight_normalization + canonical_technique_2_canonical_reservoir_3_pdf * mis_weight_normalization + canonical_technique_3_canonical_reservoir_3_pdf * mis_weight_normalization);\n\n        return mis_weight;\n    }\n\n    HIPRT_DEVICE float get_canonical_MIS_weight_1(float canonical_technique_1_canonical_reservoir_1_pdf, float canonical_technique_2_canonical_reservoir_1_pdf, float canonical_technique_3_canonical_reservoir_1_pdf, float mis_weight_normalization)\n    {\n        if (mis_weight_normalization == 0.0f)\n            // We only have the canonical techniques available, we're going to go for a balance heuristic between them\n            return canonical_technique_1_canonical_reservoir_1_pdf / (canonical_technique_1_canonical_reservoir_1_pdf + canonical_technique_2_canonical_reservoir_1_pdf + canonical_technique_3_canonical_reservoir_1_pdf);\n\n        return m_sum_canonical_weight_1 * mis_weight_normalization;\n    }\n\n    HIPRT_DEVICE float get_canonical_MIS_weight_2(float canonical_technique_1_canonical_reservoir_2_pdf, float canonical_technique_2_canonical_reservoir_2_pdf, float canonical_technique_3_canonical_reservoir_2_pdf, float mis_weight_normalization)\n    {\n        if (mis_weight_normalization == 0.0f)\n            // We only have the canonical techniques available, we're going to go for a balance heuristic between them\n            return canonical_technique_2_canonical_reservoir_2_pdf / (canonical_technique_1_canonical_reservoir_2_pdf + canonical_technique_2_canonical_reservoir_2_pdf + canonical_technique_3_canonical_reservoir_2_pdf);\n\n        return m_sum_canonical_weight_2 * mis_weight_normalization;\n    }\n\n    HIPRT_DEVICE float get_canonical_MIS_weight_3(float canonical_technique_1_canonical_reservoir_3_pdf, float canonical_technique_2_canonical_reservoir_3_pdf, float canonical_technique_3_canonical_reservoir_3_pdf, float mis_weight_normalization)\n    {\n        if (mis_weight_normalization == 0.0f)\n            // We only have the canonical techniques available, we're going to go for a balance heuristic between them\n            return canonical_technique_3_canonical_reservoir_3_pdf / (canonical_technique_1_canonical_reservoir_3_pdf + canonical_technique_2_canonical_reservoir_3_pdf + canonical_technique_3_canonical_reservoir_3_pdf);\n\n        return m_sum_canonical_weight_3 * mis_weight_normalization;\n    }\n\n    // 1st is non-canonical samples\n    float m_sum_canonical_weight_1 = 0.0f;\n    // 2nd technique is canonical samples\n    float m_sum_canonical_weight_2 = 0.0f;\n    // 3rd technique is BSDF samples\n    float m_sum_canonical_weight_3 = 0.0f;\n};\n\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle_regir_with_info(\n    const HIPRTRenderData& render_data,\n    const float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal,\n    int last_hit_primitive_index, RayPayload& ray_payload,\n    bool& out_need_fallback_sampling,\n    Xorshift32Generator& random_number_generator,\n    ReGIRShadingAdditionalInfo& out_infos)\n{\n    // Starting with this at true and if we find a single good neighbor,\n    // this will be set to false\n    out_need_fallback_sampling = true;\n\n    float3 selected_point_on_light = make_float3(0.0f, 0.0f, 0.0f);\n    float3 selected_light_source_normal = make_float3(0.0f, 0.0f, 0.0f);\n    float selected_light_source_area = 0.0f;\n    BSDFIncidentLightInfo selected_incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n    ColorRGB32F selected_emission;\n\n    ReGIRReservoir out_reservoir;\n\n    // Some random seed to generate to positions of the neighbors (when jittering)\n    // XORing here because not XORing was causing RNG correlations issues...\n    // not sure how that works but more randomness here seems to be getting rid of those correlations issues\n    unsigned neighbor_rng_seed = random_number_generator.xorshift32() ^ random_number_generator.xorshift32();\n    unsigned non_cano_neighbor_rng_seed = neighbor_rng_seed ^ random_number_generator.xorshift32();\n    Xorshift32Generator non_canonical_neighbor_rng(non_cano_neighbor_rng_seed);\n    Xorshift32Generator neighbor_rng(neighbor_rng_seed);\n\n\tconst ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n    unsigned int valid_non_canonical_neighbors = 0;\n    for (int neighbor = 0; neighbor < regir_settings.shading.number_of_neighbors; neighbor++)\n    {\n        unsigned int neighbor_grid_cell_index = regir_settings.find_valid_jittered_neighbor_cell_index<false>(\n            shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n            regir_settings.shading.get_do_cell_jittering(regir_settings.compute_is_primary_hit(ray_payload)),\n            regir_settings.shading.jittering_radius, non_canonical_neighbor_rng);\n        if (neighbor_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n            // Not a valid neighbor\n            continue;\n        else\n            valid_non_canonical_neighbors++;\n    }\n    // Resetting the seed after the counting of the neighbors\n    non_canonical_neighbor_rng.m_state.seed = non_cano_neighbor_rng_seed;\n\n#if ReGIR_ShadingResamplingDoMISPairwiseMIS\n    {\n        ReGIRPairwiseMIS pairwise;\n\n        unsigned int canonical_grid_cell_index = regir_settings.find_valid_jittered_neighbor_cell_index<true>(\n            shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n            ReGIR_ShadingResamplingJitterCanonicalCandidates,\n            regir_settings.shading.jittering_radius, neighbor_rng);\n\n        float UCW_1 = 0.0f, UCW_2 = 0.0f;\n        int triangle_index_1 = -1, triangle_index_2 = -1, triangle_index_3 = -1;\n        float3 point_on_light_1, point_on_light_2, point_on_light_3 = make_float3(0.0f, 0.0f, 0.0f);\n        float3 light_source_normal_1, light_source_normal_2, light_source_normal_3 = make_float3(0.0f, 0.0f, 0.0f);\n        ColorRGB32F emission_1, emission_2, emission_3;\n\n        BSDFIncidentLightInfo canonical_technique_3_sample_ili = BSDFIncidentLightInfo::NO_INFO;\n\n        ReGIRGridFillSurface center_cell_surface;\n\n        float canonical_technique_1_canonical_reservoir_1_pdf = 0.0f;\n        float canonical_technique_1_canonical_reservoir_2_pdf = 0.0f;\n        float canonical_technique_1_canonical_reservoir_3_pdf = 0.0f;\n        float canonical_technique_2_canonical_reservoir_1_pdf = 0.0f;\n        float canonical_technique_2_canonical_reservoir_2_pdf = 0.0f;\n        float canonical_technique_2_canonical_reservoir_3_pdf = 0.0f;\n        float canonical_technique_3_canonical_reservoir_1_pdf = 0.0f;\n        float canonical_technique_3_canonical_reservoir_2_pdf = 0.0f;\n        float canonical_technique_3_canonical_reservoir_3_pdf = 0.0f;\n        float mis_weight_normalization = pairwise.compute_MIS_weight_normalization(render_data, valid_non_canonical_neighbors);\n\n        float non_canonical_RIS_integral_center_grid_cell;\n        float canonical_RIS_integral_center_grid_cell;\n\n        // Fetching the center cell should never fail because the center cell always exists but it may actually fail in case of collisions\n        // that cannot be resolved\n        if (canonical_grid_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n        {\n            // We found at least one good sample so we're not going to need a fallback on another light sampling strategy than ReGIR\n            out_need_fallback_sampling = false;\n\n            // Producing the canonical techniques samples\n            {\n                ReGIRReservoir canonical_technique_1_reservoir = regir_settings.get_random_reservoir_in_grid_cell_for_shading<false>(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n                ReGIRReservoir canonical_technique_2_reservoir = regir_settings.get_random_reservoir_in_grid_cell_for_shading<true>(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n\n                if constexpr (ReGIR_ShadingResamplingDoBSDFMIS)\n                {\n                    float bsdf_sample_pdf;\n                    float3 sampled_bsdf_direction;\n\n                    BSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, make_float3(0.0f, 0.0f, 0.0f), canonical_technique_3_sample_ili, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n                    bsdf_dispatcher_sample(render_data, bsdf_context, sampled_bsdf_direction, bsdf_sample_pdf, random_number_generator);\n\n                    bool intersection_found = false;\n                    BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n                    if (bsdf_sample_pdf > 0.0f)\n                    {\n                        hiprtRay new_ray;\n                        new_ray.origin = shading_point;\n                        new_ray.direction = sampled_bsdf_direction;\n\n                        intersection_found = evaluate_bsdf_light_sample_ray_simplified(render_data, new_ray, 1.0e35f, shadow_light_ray_hit_info, last_hit_primitive_index, ray_payload.bounce, random_number_generator);\n\n                        // Checking that we did hit something and if we hit something,\n                        // it needs to be emissive\n                        if (intersection_found && !shadow_light_ray_hit_info.hit_emission.is_black())\n                        {\n                            triangle_index_3 = shadow_light_ray_hit_info.hit_prim_index;\n                            point_on_light_3 = shading_point + shadow_light_ray_hit_info.hit_distance * sampled_bsdf_direction;\n                            light_source_normal_3 = shadow_light_ray_hit_info.hit_geometric_normal;\n                            emission_3 = shadow_light_ray_hit_info.hit_emission;\n\n                            // We want ReGIR to produce PDFs that are in area measure so we're converting from solid angle to area measure here\n                            canonical_technique_3_canonical_reservoir_3_pdf = solid_angle_to_area_pdf(bsdf_sample_pdf, shadow_light_ray_hit_info.hit_distance, compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -sampled_bsdf_direction));\n                        }\n                    }\n                }\n\n                // Extracting the data of the canonical reservoirs 1 and 2\n                if (canonical_technique_1_reservoir.UCW > 0.0f)\n                {\n                    UCW_1 = canonical_technique_1_reservoir.UCW;\n                    triangle_index_1 = canonical_technique_1_reservoir.sample.emissive_triangle_index;\n                    point_on_light_1 = canonical_technique_1_reservoir.sample.point_on_light;\n                    light_source_normal_1 = hippt::normalize(get_triangle_normal_not_normalized(render_data, canonical_technique_1_reservoir.sample.emissive_triangle_index));\n                    emission_1 = get_emission_of_triangle_from_index(render_data, canonical_technique_1_reservoir.sample.emissive_triangle_index);\n                }\n\n                if (canonical_technique_2_reservoir.UCW > 0.0f)\n                {\n                    UCW_2 = canonical_technique_2_reservoir.UCW;\n                    triangle_index_2 = canonical_technique_2_reservoir.sample.emissive_triangle_index;\n                    point_on_light_2 = canonical_technique_2_reservoir.sample.point_on_light;\n                    light_source_normal_2 = hippt::normalize(get_triangle_normal_not_normalized(render_data, canonical_technique_2_reservoir.sample.emissive_triangle_index));\n                    emission_2 = get_emission_of_triangle_from_index(render_data, canonical_technique_2_reservoir.sample.emissive_triangle_index);\n                }\n            }\n\n            // Computing all the PDFs of the canonical techniques that we're going to need for pairwise MIS\n            {\n                if (!emission_1.is_black())\n                {\n                    // TODO we already have the canonical / non-canonical PDF normalization (fetched below) so we can use them because otherwise, that function fetches them again\n                    canonical_technique_1_canonical_reservoir_1_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, point_on_light_1, light_source_normal_1, emission_1, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n                    canonical_technique_2_canonical_reservoir_1_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<true>(render_data, point_on_light_1, light_source_normal_1, emission_1, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n                    canonical_technique_3_canonical_reservoir_1_pdf = ReGIR_get_reservoir_sample_BSDF_PDF(render_data, point_on_light_1, light_source_normal_1, emission_1, view_direction, shading_point, shading_normal, geometric_normal, BSDFIncidentLightInfo::NO_INFO, ray_payload, last_hit_primitive_index);\n#endif\n                }\n\n                if (!emission_2.is_black())\n                {\n                    canonical_technique_1_canonical_reservoir_2_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, point_on_light_2, light_source_normal_2, emission_2, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n                    canonical_technique_2_canonical_reservoir_2_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<true>(render_data, point_on_light_2, light_source_normal_2, emission_2, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n                    canonical_technique_3_canonical_reservoir_2_pdf = ReGIR_get_reservoir_sample_BSDF_PDF(render_data, point_on_light_2, light_source_normal_2, emission_2, view_direction, shading_point, shading_normal, geometric_normal, BSDFIncidentLightInfo::NO_INFO, ray_payload, last_hit_primitive_index);\n#endif\n                }\n\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n                if (!emission_3.is_black())\n                {\n                    canonical_technique_1_canonical_reservoir_3_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<false>(render_data, point_on_light_3, light_source_normal_3, emission_3, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n                    canonical_technique_2_canonical_reservoir_3_pdf = ReGIR_get_reservoir_sample_ReGIR_PDF<true>(render_data, point_on_light_3, light_source_normal_3, emission_3, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n                    // This one has already been computed when sampling the BSDF sample\n                    // canonical_technique_3_canonical_reservoir_3_pdf....\n                }\n#endif\n            }\n\n            {\n                non_canonical_RIS_integral_center_grid_cell = regir_settings.get_non_canonical_pre_integration_factor(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                if (non_canonical_RIS_integral_center_grid_cell == 0.0f)\n                    non_canonical_RIS_integral_center_grid_cell = 1.0f;\n                if (!regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                    non_canonical_RIS_integral_center_grid_cell = 1.0f;\n\n                canonical_RIS_integral_center_grid_cell = regir_settings.get_canonical_pre_integration_factor(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                if (canonical_RIS_integral_center_grid_cell == 0.0f)\n                    canonical_RIS_integral_center_grid_cell = 1.0f;\n                if (!regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                    canonical_RIS_integral_center_grid_cell = 1.0f;\n            }\n\n\t\t\tcenter_cell_surface = ReGIR_get_cell_surface(render_data, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n        }\n        else\n        {\n            // The center grid cell is invalid (must be because of hash grid collisions that couldn't be resolved)\n            out_need_fallback_sampling = true;\n\n            return LightSampleInformation();\n        }\n\n        for (int neighbor = 0; neighbor < regir_settings.shading.number_of_neighbors; neighbor++)\n        {\n            unsigned int neighbor_grid_cell_index = regir_settings.find_valid_jittered_neighbor_cell_index<false>(\n                shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n                regir_settings.shading.get_do_cell_jittering(regir_settings.compute_is_primary_hit(ray_payload)),\n                regir_settings.shading.jittering_radius, non_canonical_neighbor_rng);\n            if (neighbor_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n                // Couldn't find a valid neighbor\n                continue;\n            else\n                out_need_fallback_sampling = false;\n\n\t\t\tReGIRGridFillSurface neighbor_surface = ReGIR_get_cell_surface(render_data, neighbor_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n\n            float neighbor_RIS_integral = regir_settings.get_non_canonical_pre_integration_factor(neighbor_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n            if (neighbor_RIS_integral == 0.0f)\n                neighbor_RIS_integral = 1.0f;\n            if (!regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                neighbor_RIS_integral = 1.0f;\n\n            for (int i = 0; i < regir_settings.shading.reservoir_tap_count_per_neighbor; i++)\n            {\n                // Will be set to true if the jittering causes the current shading point to be jittered out of the scene\n                ReGIRReservoir non_canonical_reservoir = regir_settings.get_random_reservoir_in_grid_cell_for_shading<false>(neighbor_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), neighbor_rng);\n                \n                if (non_canonical_reservoir.UCW <= 0.0f)\n                {\n                    // No valid sample in that reservoir\n\n                    pairwise.sum_non_canonical_sample_to_canonical_weights(render_data, \n                        point_on_light_1, light_source_normal_1, emission_1,\n                        point_on_light_2, light_source_normal_2, emission_2,\n                        point_on_light_3, light_source_normal_3, emission_3,\n\n                        canonical_technique_1_canonical_reservoir_1_pdf, canonical_technique_1_canonical_reservoir_2_pdf, canonical_technique_1_canonical_reservoir_3_pdf,\n                        canonical_technique_2_canonical_reservoir_1_pdf, canonical_technique_2_canonical_reservoir_2_pdf, canonical_technique_2_canonical_reservoir_3_pdf,\n                        canonical_technique_3_canonical_reservoir_1_pdf, canonical_technique_3_canonical_reservoir_2_pdf, canonical_technique_3_canonical_reservoir_3_pdf,\n                        mis_weight_normalization,\n\n                        neighbor_surface, neighbor_RIS_integral, regir_settings.compute_is_primary_hit(ray_payload), random_number_generator);\n\n                    continue;\n                }\n\n                float3 point_on_light = non_canonical_reservoir.sample.point_on_light;\n                float3 light_source_normal = get_triangle_normal_not_normalized(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n                float light_source_area = hippt::length(light_source_normal) * 0.5f;\n                light_source_normal /= light_source_area * 2.0f;\n                ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n\n                ColorRGB32F sample_radiance;\n                float target_function = ReGIR_shading_evaluate_target_function<\n                    ReGIR_ShadingResamplingTargetFunctionVisibility || ReGIR_ShadingResamplingShadeAllSamples,\n                    ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                        shading_point, view_direction, shading_normal, geometric_normal,\n                        last_hit_primitive_index, ray_payload,\n                        point_on_light, light_source_normal, emission, random_number_generator, sample_radiance);\n\n                float non_canonical_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, neighbor_surface, regir_settings.compute_is_primary_hit(ray_payload), emission, light_source_normal, point_on_light, random_number_generator);\n                float current_sample_PDF = non_canonical_sample_PDF_unnormalized / neighbor_RIS_integral;\n                float mis_weight = pairwise.compute_MIS_weight_for_non_canonical_sample(render_data,\n                    point_on_light, light_source_normal, emission,\n\n                    point_on_light_1, light_source_normal_1, emission_1,\n                    point_on_light_2, light_source_normal_2, emission_2,\n                    point_on_light_3, light_source_normal_3, emission_3,\n\n\t\t\t\t\tcenter_cell_surface, regir_settings.compute_is_primary_hit(ray_payload),\n\n                    canonical_technique_1_canonical_reservoir_1_pdf, canonical_technique_1_canonical_reservoir_2_pdf, canonical_technique_1_canonical_reservoir_3_pdf,\n                    canonical_technique_2_canonical_reservoir_1_pdf, canonical_technique_2_canonical_reservoir_2_pdf, canonical_technique_2_canonical_reservoir_3_pdf,\n                    canonical_technique_3_canonical_reservoir_1_pdf, canonical_technique_3_canonical_reservoir_2_pdf, canonical_technique_3_canonical_reservoir_3_pdf,\n\n                    mis_weight_normalization,\n\n                    non_canonical_RIS_integral_center_grid_cell, canonical_RIS_integral_center_grid_cell, current_sample_PDF,\n                    neighbor_surface, neighbor_RIS_integral,\n\n                    view_direction, shading_point, shading_normal, geometric_normal, ray_payload, last_hit_primitive_index,\n                    random_number_generator);\n\n                if (out_reservoir.stream_reservoir(mis_weight, target_function, non_canonical_reservoir, random_number_generator))\n                {\n                    selected_point_on_light = point_on_light;\n                    selected_light_source_normal = light_source_normal;\n                    selected_light_source_area = light_source_area;\n                    selected_emission = emission;\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_FALSE\n\t\t\t\t\tout_infos.sample_radiance = sample_radiance;\n#endif\n                }\n\t\t\t\t\t\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_TRUE\n                out_infos.sample_radiance += sample_radiance * non_canonical_reservoir.UCW * mis_weight;\n#endif\n            }\n        }\n\n        if (canonical_grid_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n        {\n            if (!emission_1.is_black())\n            {\n                float light_source_area = hippt::length(get_triangle_normal_not_normalized(render_data, triangle_index_1)) * 0.5f;\n\n                {\n                    // Adding visibility in the canonical sample target function's if we have visibility in the grid fill target function\n                    // or if we wwant visibility in the target function during shading resampling\n                    // or if we're shading all candidates because then we want the target function to produce\n                    // the radiance towards the shading point directly which means that we need the visibility in the target function\n                    ColorRGB32F sample_radiance;\n                    float target_function = ReGIR_shading_evaluate_target_function<ReGIR_GridFillTargetFunctionVisibility || ReGIR_ShadingResamplingTargetFunctionVisibility || ReGIR_ShadingResamplingShadeAllSamples, ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                        shading_point, view_direction, shading_normal, geometric_normal,\n                        last_hit_primitive_index, ray_payload,\n                        point_on_light_1, light_source_normal_1, emission_1, random_number_generator, sample_radiance);\n\n                    float RIS_integral = regir_settings.get_non_canonical_pre_integration_factor(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n                    float non_canonical_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload),\n                        emission_1, light_source_normal_1, point_on_light_1, random_number_generator);\n                    float non_canonical_sample_PDF = non_canonical_sample_PDF_unnormalized / RIS_integral;\n\n                    float mis_weight = pairwise.get_canonical_MIS_weight_1(canonical_technique_1_canonical_reservoir_1_pdf, canonical_technique_2_canonical_reservoir_1_pdf, canonical_technique_3_canonical_reservoir_1_pdf, mis_weight_normalization);\n\n                    ReGIRReservoir canonical_technique_1_reservoir;\n                    canonical_technique_1_reservoir.sample.emissive_triangle_index = triangle_index_1;\n                    canonical_technique_1_reservoir.sample.point_on_light = point_on_light_1;\n                    canonical_technique_1_reservoir.UCW = UCW_1;\n                    if (out_reservoir.stream_reservoir(mis_weight, target_function, canonical_technique_1_reservoir, random_number_generator))\n                    {\n                        selected_point_on_light = point_on_light_1;\n                        selected_light_source_normal = light_source_normal_1;\n                        selected_light_source_area = light_source_area;\n                        selected_emission = emission_1;\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_FALSE\n                        out_infos.sample_radiance = sample_radiance;\n#endif\n                    }\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_TRUE\n                    out_infos.sample_radiance += sample_radiance * canonical_technique_1_reservoir.UCW * mis_weight;\n#endif\n                }\n            }\n        }\n\n        // Incorporating a canonical candidate if doing visibility reuse because visibility reuse\n        // may cause the grid cell to produce no valid reservoir at all so we need canonical samples to\n        // cover those cases for unbiased results\n        // \n        // Fetching the center cell should never fail because the center cell always exists but it may actually fail in case of collisions\n        // that cannot be resolved\n        if (canonical_grid_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n        {\n            if (!emission_2.is_black())\n            {\n                float light_source_area = hippt::length(get_triangle_normal_not_normalized(render_data, triangle_index_2)) * 0.5f;\n\n                {\n                    // Adding visibility in the canonical sample target function's if we have visibility in the grid fill target function\n                    // or if we wwant visibility in the target function during shading resampling\n                    // or if we're shading all candidates because then we want the target function to produce\n                    // the radiance towards the shading point directly which means that we need the visibility in the target function\n                    ColorRGB32F sample_radiance;\n                    float target_function = ReGIR_shading_evaluate_target_function<ReGIR_GridFillTargetFunctionVisibility || ReGIR_ShadingResamplingTargetFunctionVisibility || ReGIR_ShadingResamplingShadeAllSamples, ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                        shading_point, view_direction, shading_normal, geometric_normal,\n                        last_hit_primitive_index, ray_payload,\n                        point_on_light_2, light_source_normal_2, emission_2, random_number_generator, sample_radiance);\n\n                    float RIS_integral = regir_settings.get_canonical_pre_integration_factor(canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n                    float canonical_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, canonical_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload),\n                        emission_2, light_source_normal_2, point_on_light_2, random_number_generator);\n                    float canonical_sample_PDF = canonical_sample_PDF_unnormalized / RIS_integral;\n\n                    float mis_weight = pairwise.get_canonical_MIS_weight_2(canonical_technique_1_canonical_reservoir_2_pdf, canonical_technique_2_canonical_reservoir_2_pdf, canonical_technique_3_canonical_reservoir_2_pdf, mis_weight_normalization);\n\n                    ReGIRReservoir canonical_technique_2_reservoir;\n                    canonical_technique_2_reservoir.sample.emissive_triangle_index = triangle_index_2;\n                    canonical_technique_2_reservoir.sample.point_on_light = point_on_light_2;\n                    canonical_technique_2_reservoir.UCW = UCW_2;\n                    if (out_reservoir.stream_reservoir(mis_weight, target_function, canonical_technique_2_reservoir, random_number_generator))\n                    {\n                        selected_point_on_light = point_on_light_2;\n                        selected_light_source_normal = light_source_normal_2;\n                        selected_light_source_area = light_source_area;\n                        selected_emission = emission_2;\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_FALSE\n                        out_infos.sample_radiance = sample_radiance;\n#endif\n                    }\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_TRUE\n                    out_infos.sample_radiance += sample_radiance * canonical_technique_2_reservoir.UCW * mis_weight;\n#endif\n                }\n            }\n        }\n\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n        if (canonical_technique_3_canonical_reservoir_3_pdf > 0.0f)\n        {\n            float mis_weight = pairwise.get_canonical_MIS_weight_3(canonical_technique_1_canonical_reservoir_3_pdf, canonical_technique_2_canonical_reservoir_3_pdf, canonical_technique_3_canonical_reservoir_3_pdf, mis_weight_normalization);\n\n            ColorRGB32F sample_radiance;\n            float target_function = ReGIR_shading_evaluate_target_function<ReGIR_ShadingResamplingTargetFunctionVisibility || ReGIR_ShadingResamplingShadeAllSamples,\n                ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                    shading_point, view_direction, shading_normal, geometric_normal, last_hit_primitive_index,\n                    ray_payload, point_on_light_3, light_source_normal_3, emission_3,\n                    random_number_generator, sample_radiance, canonical_technique_3_sample_ili);\n\n            if (out_reservoir.stream_sample_raw(mis_weight, target_function, canonical_technique_3_canonical_reservoir_3_pdf, triangle_index_3, point_on_light_3, random_number_generator))\n            {\n                selected_point_on_light = point_on_light_3;\n                selected_light_source_normal = light_source_normal_3;\n                selected_light_source_area = hippt::length(get_triangle_normal_not_normalized(render_data, triangle_index_3)) * 0.5f;\n                selected_emission = emission_3;\n                selected_incident_light_info = canonical_technique_3_sample_ili;\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_FALSE\n                out_infos.sample_radiance = sample_radiance;\n#endif\n            }\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_TRUE\n            out_infos.sample_radiance += sample_radiance / canonical_technique_3_canonical_reservoir_3_pdf * mis_weight;\n#endif\n        }\n#endif\n\n        if (out_reservoir.weight_sum == 0.0f || out_need_fallback_sampling)\n            return LightSampleInformation();\n\n        out_reservoir.finalize_resampling(1.0f, 1.0f);\n    }\n#else\n    {\n        int selected_neighbor = -1;\n\n        for (int neighbor = 0; neighbor < render_data.render_settings.regir_settings.shading.number_of_neighbors; neighbor++)\n        {\n            unsigned int neighbor_grid_cell_index = render_data.render_settings.regir_settings.find_valid_jittered_neighbor_cell_index<false>(\n                shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n                render_data.render_settings.regir_settings.shading.get_do_cell_jittering(regir_settings.compute_is_primary_hit(ray_payload)),\n                render_data.render_settings.regir_settings.shading.jittering_radius, neighbor_rng);\n            if (neighbor_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n                // Couldn't find a valid neighbor\n                continue;\n            else\n                out_need_fallback_sampling = false;\n\n            for (int i = 0; i < render_data.render_settings.regir_settings.shading.reservoir_tap_count_per_neighbor; i++)\n            {\n                // Will be set to true if the jittering causes the current shading point to be jittered out of the scene\n                ReGIRReservoir non_canonical_reservoir = render_data.render_settings.regir_settings.get_random_reservoir_in_grid_cell_for_shading<false>(neighbor_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), neighbor_rng);\n\n                if (non_canonical_reservoir.UCW <= 0.0f)\n                    // No valid sample in that reservoir\n                    continue;\n\n                float3 point_on_light = non_canonical_reservoir.sample.point_on_light;\n                float3 light_source_normal = get_triangle_normal_not_normalized(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n                float light_source_area = hippt::length(light_source_normal) * 0.5f;\n                light_source_normal /= light_source_area * 2.0f;\n\n                ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n                float target_function = ReGIR_shading_evaluate_target_function<\n                    ReGIR_ShadingResamplingTargetFunctionVisibility,\n                    ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                        shading_point, view_direction, shading_normal, geometric_normal,\n                        last_hit_primitive_index, ray_payload,\n                        point_on_light, light_source_normal,\n                        emission, random_number_generator);\n\n                float mis_weight = 1.0f;\n\n                if (out_reservoir.stream_reservoir(mis_weight, target_function, non_canonical_reservoir, random_number_generator))\n                {\n                    selected_neighbor = neighbor;\n\n                    selected_point_on_light = point_on_light;\n                    selected_light_source_normal = light_source_normal;\n                    selected_light_source_area = light_source_area;\n                    selected_emission = emission;\n                }\n            }\n        }\n\n        out_need_fallback_sampling = false;\n\n        float bsdf_sample_pdf;\n        float3 sampled_bsdf_direction;\n        BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE\n        BSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, make_float3(0.0f, 0.0f, 0.0f), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n        ColorRGB32F bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, sampled_bsdf_direction, bsdf_sample_pdf, random_number_generator);\n\n        bool intersection_found = false;\n        BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n        if (bsdf_sample_pdf > 0.0f)\n        {\n            hiprtRay new_ray;\n            new_ray.origin = shading_point;\n            new_ray.direction = sampled_bsdf_direction;\n\n            intersection_found = evaluate_bsdf_light_sample_ray_simplified(render_data, new_ray, 1.0e35f, shadow_light_ray_hit_info, last_hit_primitive_index, ray_payload.bounce, random_number_generator);\n\n            // Checking that we did hit something and if we hit something,\n            // it needs to be emissive\n            if (intersection_found && !shadow_light_ray_hit_info.hit_emission.is_black())\n            {\n                LightSampleInformation light_sample;\n                light_sample.emission = shadow_light_ray_hit_info.hit_emission;\n                light_sample.emissive_triangle_index = shadow_light_ray_hit_info.hit_prim_index;\n                light_sample.light_area = triangle_area(render_data, shadow_light_ray_hit_info.hit_prim_index);\n                light_sample.light_source_normal = shadow_light_ray_hit_info.hit_geometric_normal;\n                light_sample.point_on_light = shading_point + shadow_light_ray_hit_info.hit_distance * sampled_bsdf_direction;\n\n                float mis_weight = 1.0f;\n                float target_function = ReGIR_shading_evaluate_target_function<\n                    ReGIR_ShadingResamplingTargetFunctionVisibility,\n                    ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                        shading_point, view_direction, shading_normal, geometric_normal, last_hit_primitive_index,\n                        ray_payload, light_sample.point_on_light, light_sample.light_source_normal, light_sample.emission,\n                        random_number_generator, incident_light_info);\n\n                float area_measure_bsdf_pdf = solid_angle_to_area_pdf(bsdf_sample_pdf, shadow_light_ray_hit_info.hit_distance, compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -sampled_bsdf_direction));\n                if (out_reservoir.stream_sample(mis_weight, target_function, area_measure_bsdf_pdf, light_sample, random_number_generator))\n                {\n                    selected_neighbor = render_data.render_settings.regir_settings.shading.number_of_neighbors;\n\n                    selected_point_on_light = light_sample.point_on_light;\n                    selected_light_source_normal = shadow_light_ray_hit_info.hit_geometric_normal;\n                    selected_light_source_area = light_sample.light_area;\n                    selected_emission = shadow_light_ray_hit_info.hit_emission;\n                    selected_incident_light_info = incident_light_info;\n                }\n            }\n        }\n#endif\n\n        bool need_canonical = (ReGIR_GridFillTargetFunctionVisibility || ReGIR_GridFillTargetFunctionCosineTerm || ReGIR_GridFillTargetFunctionCosineTermLightSource);\n        if (need_canonical)\n        {\n            // Will be set to true if the jittering causes the current shading point to be jittered out of the scene\n            unsigned int neighbor_grid_cell_index = render_data.render_settings.regir_settings.find_valid_jittered_neighbor_cell_index<true>(\n                shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n                false, // render_data.render_settings.regir_settings.shading.do_cell_jittering,\n                render_data.render_settings.regir_settings.shading.jittering_radius, neighbor_rng);\n\n            // Fetching the center cell should never fail because the center cell always exists but it may actually fail in case of collisions\n            // that cannot be resolved\n            if (neighbor_grid_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n            {\n                // We found at least one good sample so we're not going to need a fallback on another light sampling strategy than ReGIR\n                out_need_fallback_sampling = false;\n\n                ReGIRReservoir canonical_reservoir = render_data.render_settings.regir_settings.get_random_reservoir_in_grid_cell_for_shading<true>(neighbor_grid_cell_index, regir_settings.compute_is_primary_hit(ray_payload), neighbor_rng);\n\n                if (canonical_reservoir.UCW > 0.0f && canonical_reservoir.UCW != ReGIRReservoir::UNDEFINED_UCW)\n                {\n                    ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, canonical_reservoir.sample.emissive_triangle_index);\n\n                    float3 point_on_light = canonical_reservoir.sample.point_on_light;\n                    float3 light_source_normal = get_triangle_normal_not_normalized(render_data, canonical_reservoir.sample.emissive_triangle_index);\n                    float light_source_area = hippt::length(light_source_normal) * 0.5f;\n                    light_source_normal /= light_source_area * 2.0f;\n\n                    {\n                        // Adding visibility in the canonical sample target function's if we have visibility in the grid fill target function\n                        // or if we wwant visibility in the target function during shading resampling\n                        // or if we're shading all candidates because then we want the target function to produce\n                        // the radiance towards the shading point directly which means that we need the visibility in the target function\n                        float target_function = ReGIR_shading_evaluate_target_function<ReGIR_GridFillTargetFunctionVisibility || ReGIR_ShadingResamplingTargetFunctionVisibility, ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility>(render_data,\n                            shading_point, view_direction, shading_normal, geometric_normal,\n                            last_hit_primitive_index, ray_payload,\n                            point_on_light, light_source_normal,\n                            emission, random_number_generator);\n\n                        float mis_weight = 1.0f;\n                        if (out_reservoir.stream_reservoir(mis_weight, target_function, canonical_reservoir, random_number_generator))\n                        {\n                            selected_neighbor = render_data.render_settings.regir_settings.shading.number_of_neighbors + ReGIR_ShadingResamplingDoBSDFMIS;\n\n                            selected_point_on_light = point_on_light;\n                            selected_light_source_normal = light_source_normal;\n                            selected_light_source_area = light_source_area;\n                            selected_emission = emission;\n                        }\n                    }\n                }\n            }\n        }\n\n        if (out_reservoir.weight_sum == 0.0f || out_need_fallback_sampling)\n            return LightSampleInformation();\n\n        neighbor_rng.m_state.seed = neighbor_rng_seed;\n\n        float normalization_denominator = 0.0f;\n        float normalization_numerator = 0.0f;\n        for (int i = 0; i < render_data.render_settings.regir_settings.shading.number_of_neighbors + need_canonical + ReGIR_ShadingResamplingDoBSDFMIS; i++)\n        {\n            bool is_bsdf_sample = (i == render_data.render_settings.regir_settings.shading.number_of_neighbors && ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE);\n            bool is_canonical = i == render_data.render_settings.regir_settings.shading.number_of_neighbors + ReGIR_ShadingResamplingDoBSDFMIS && need_canonical;\n\n            unsigned int neighbor_cell_index;\n            if (is_bsdf_sample)\n            {\n                neighbor_cell_index = render_data.render_settings.regir_settings.get_neighbor_replay_hash_grid_cell_index_for_shading(\n                    shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n                    false,\n                    false,\n                    render_data.render_settings.regir_settings.shading.jittering_radius,\n                    neighbor_rng);\n            }\n            else\n            {\n                neighbor_cell_index = render_data.render_settings.regir_settings.get_neighbor_replay_hash_grid_cell_index_for_shading(\n                    shading_point, geometric_normal, render_data.current_camera, ray_payload.material.roughness, regir_settings.compute_is_primary_hit(ray_payload),\n                    is_canonical,\n                    render_data.render_settings.regir_settings.shading.get_do_cell_jittering(regir_settings.compute_is_primary_hit(ray_payload)),\n                    render_data.render_settings.regir_settings.shading.jittering_radius,\n                    neighbor_rng);\n            }\n\n            if (neighbor_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n                // Outside of the alive grid\n                //\n                // Note that this also applies for the canonical sample because canonical samples are gathered\n                // from neighbors. But if the neighbor is outside of the grid (or in a non-alive grid cell), then\n                // we have no canonical neighbor to count in the MIS weights\n                continue;\n\n            if (selected_neighbor == i)\n            {\n                if (is_canonical)\n                {\n                    float RIS_integral = render_data.render_settings.regir_settings.get_canonical_pre_integration_factor(neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!render_data.render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n\n                    float light_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload),\n                        selected_emission, selected_light_source_normal, selected_point_on_light, random_number_generator);\n                    float light_sample_PDF = light_sample_PDF_unnormalized / RIS_integral;\n\n                    normalization_numerator = light_sample_PDF;\n                    normalization_denominator += light_sample_PDF;\n                }\n                else if (is_bsdf_sample)\n                {\n                    float bsdf_pdf;\n                    BSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, hippt::normalize(out_reservoir.sample.point_on_light - shading_point), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n                    ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n                    bsdf_pdf = solid_angle_to_area_pdf(bsdf_pdf, hippt::length(out_reservoir.sample.point_on_light - shading_point), compute_cosine_term_at_light_source(selected_light_source_normal, -hippt::normalize(out_reservoir.sample.point_on_light - shading_point)));\n\n                    normalization_numerator = bsdf_pdf;\n                    normalization_denominator += bsdf_pdf;\n                }\n                else\n                {\n                    // Non-canonical sample\n\n                    float RIS_integral = render_data.render_settings.regir_settings.get_non_canonical_pre_integration_factor(neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!render_data.render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n\n                    float light_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload),\n                        selected_emission, selected_light_source_normal, selected_point_on_light, random_number_generator);\n                    float light_sample_PDF = light_sample_PDF_unnormalized / RIS_integral;\n\n                    normalization_numerator = light_sample_PDF;\n                    normalization_denominator += light_sample_PDF * render_data.render_settings.regir_settings.shading.reservoir_tap_count_per_neighbor;\n                }\n\n                continue;\n            }\n            else\n            {\n                if (is_canonical)\n                {\n                    float RIS_integral = render_data.render_settings.regir_settings.get_canonical_pre_integration_factor(neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!render_data.render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n\n                    float light_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload),\n                        selected_emission, selected_light_source_normal, selected_point_on_light, random_number_generator);\n                    float light_sample_PDF = light_sample_PDF_unnormalized / RIS_integral;\n\n                    normalization_denominator += light_sample_PDF;\n                }\n                else if (is_bsdf_sample)\n                {\n                    float bsdf_pdf;\n                    BSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, hippt::normalize(out_reservoir.sample.point_on_light - shading_point), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n                    ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n                    bsdf_pdf = solid_angle_to_area_pdf(bsdf_pdf, hippt::length(out_reservoir.sample.point_on_light - shading_point), compute_cosine_term_at_light_source(selected_light_source_normal, -hippt::normalize(out_reservoir.sample.point_on_light - shading_point)));\n\n                    normalization_denominator += bsdf_pdf;\n                }\n                else\n                {\n                    // Non-canonical sample\n\n                    float RIS_integral = render_data.render_settings.regir_settings.get_non_canonical_pre_integration_factor(neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload));\n                    if (RIS_integral == 0.0f)\n                        RIS_integral = 1.0f;\n                    if (!render_data.render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION)\n                        RIS_integral = 1.0f;\n\n                    float light_sample_PDF_unnormalized = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, neighbor_cell_index, regir_settings.compute_is_primary_hit(ray_payload), \n                        selected_emission, selected_light_source_normal, selected_point_on_light, random_number_generator);\n                    float light_sample_PDF = light_sample_PDF_unnormalized / RIS_integral;\n\n                    normalization_denominator += light_sample_PDF * render_data.render_settings.regir_settings.shading.reservoir_tap_count_per_neighbor;\n                }\n            }\n        }\n\n        out_reservoir.finalize_resampling(normalization_numerator, normalization_denominator);\n    }\n#endif\n\n    LightSampleInformation out_sample;\n\n    // The UCW is the inverse of the PDF but we expect the PDF to be in 'area_measure_pdf', not the inverse PDF (UCW), so we invert it\n    out_sample.area_measure_pdf = 1.0f / out_reservoir.UCW;\n    out_sample.emissive_triangle_index = out_reservoir.sample.emissive_triangle_index;\n    out_sample.emission = selected_emission;\n    out_sample.light_area = selected_light_source_area;\n    out_sample.light_source_normal = selected_light_source_normal;\n    out_sample.point_on_light = selected_point_on_light;\n    out_sample.incident_light_info = selected_incident_light_info;\n\n    return out_sample;\n}\n\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle_regir(\n    const HIPRTRenderData& render_data,\n    const float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal,\n    int last_hit_primitive_index, RayPayload& ray_payload,\n    bool& out_need_fallback_sampling,\n    Xorshift32Generator& random_number_generator)\n{\n    ReGIRShadingAdditionalInfo trash_info;\n    return sample_one_emissive_triangle_regir_with_info(render_data, shading_point, view_direction, shading_normal, geometric_normal,\n        last_hit_primitive_index, ray_payload, out_need_fallback_sampling, random_number_generator, trash_info);\n}\n\ntemplate <int samplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle(const HIPRTRenderData& render_data, \n    const float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal, \n    int last_hit_primitive_index, RayPayload& ray_payload,\n    Xorshift32Generator& random_number_generator)\n{\n    if constexpr (samplingStrategy == LSS_BASE_UNIFORM)\n    {\n        return sample_one_emissive_triangle_uniform(render_data, random_number_generator);\n    }\n    else if constexpr (samplingStrategy == LSS_BASE_POWER)\n    {\n        return sample_one_emissive_triangle_power(render_data, random_number_generator);\n    }\n    else if constexpr (samplingStrategy == LSS_BASE_REGIR)\n    {\n        bool point_outside_grid = false;\n\n        LightSampleInformation light_sample = sample_one_emissive_triangle_regir(render_data,\n            shading_point, view_direction, shading_normal, geometric_normal, \n            last_hit_primitive_index, ray_payload,\n            point_outside_grid,\n            random_number_generator);\n\n        if (!point_outside_grid)\n            return light_sample;\n        else\n        {\n#if ReGIR_FallbackLightSamplingStrategy == LSS_BASE_REGIR\n            // Invalid fallback strategy\n            invalid ReGIR light sampling fallback strategy\n#endif\n\n            // Fallback method as the point was outside of the ReGIR grid\n            return sample_one_emissive_triangle<ReGIR_FallbackLightSamplingStrategy>(render_data,\n                shading_point, view_direction, shading_normal, geometric_normal, \n                last_hit_primitive_index, ray_payload,\n                random_number_generator);\n        }\n    }\n}\n\n/**\n * Overload of the function used when sampling lights without a world shading point (as in ReSTIR DI light presampling for example)\n * \n * This means that positional light sampling schemes such as ReGIR or light trees cannot be used as the template argument here\n * and will produced incorrect results if used anyways\n */\ntemplate <int samplingStrategy = DirectLightSamplingBaseStrategy>\nHIPRT_DEVICE HIPRT_INLINE LightSampleInformation sample_one_emissive_triangle(const HIPRTRenderData& render_data, Xorshift32Generator& random_number_generator)\n{\n    RayPayload dummy_ray_payload;\n\n    return sample_one_emissive_triangle<samplingStrategy>(render_data,\n        make_float3(0.0f, 0.0f, 0.0f), make_float3(0.0f, 0.0f, 0.0f), make_float3(0.0f, 0.0f, 0.0f), make_float3(0.0f, 0.0f, 0.0f),\n        -1, dummy_ray_payload,\n        random_number_generator);\n}\n\n/**\n * 'clamp_condition' is an additional condition that needs to be met\n * for clamping to occur. If the additional condition is not met (the boolean\n * 'clamp_condition' is false, then the 'light_contribution' parameter is returned\n * untouched\n */\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F clamp_light_contribution(ColorRGB32F light_contribution, float clamp_max_value, bool clamp_condition)\n{\n    if (!light_contribution.has_nan() && clamp_max_value > 0.0f && clamp_condition)\n        // We don't want to clamp NaNs because that's UB (kind of) and the NaNs get\n        // immediately clamped to 'clamp_max_value' in my experience\n        //\n        // Not clamping the negatives to 0 because\n        // spectral rendering (for dispersion for example) may produce negative values\n        // and we don't want to clamp those to 0\n        light_contribution.clamp(-clamp_max_value, clamp_max_value);\n\n    return light_contribution;\n}\n\n/**\n * Returns true if the given contribution satisfies the minimum light contribution\n * required for a light to be \n */\nHIPRT_DEVICE HIPRT_INLINE bool check_minimum_light_contribution(float minimum_contribution, const ColorRGB32F& contribution)\n{\n    if (minimum_contribution > 0.0f)\n    {\n        if (contribution.r < minimum_contribution\n            && contribution.g < minimum_contribution\n            && contribution.b < minimum_contribution)\n            // The light doesn't contribute enough\n            return false;\n        else\n            // The light contributes enough\n            return true;\n    }\n    else\n        // Minimum light contribution threshold disabled\n        return true;\n}\n\nHIPRT_DEVICE HIPRT_INLINE bool check_minimum_light_contribution(float minimum_contribution, float contribution)\n{\n    if (minimum_contribution > 0.0f)\n    {\n        if (contribution < minimum_contribution)\n            // The light doesn't contribute enough\n            return false;\n        else\n            // The light contributes enough\n            return true;\n    }\n    else\n        // Minimum light contribution threshold disabled\n        return true;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/LightSampling/Lights.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_LIGHTS_H\n#define DEVICE_LIGHTS_H\n\n#include \"Device/includes/BSDFs/MicrofacetRegularization.h\"\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/DI/FinalShading.h\"\n#include \"Device/includes/ReSTIR/ReGIR/FinalShading.h\"\n#include \"Device/includes/RIS/RIS.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/SanityCheck.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_one_light_no_MIS(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    if (!MaterialUtils::can_do_light_sampling(ray_payload.material))\n        return ColorRGB32F(0.0f);\n\n    LightSampleInformation light_sample = sample_one_emissive_triangle(render_data, \n        closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, \n        closest_hit_info.primitive_index, ray_payload,\n        random_number_generator);\n    if (light_sample.area_measure_pdf <= 0.0f)\n        // Can happen for very small triangles\n        return ColorRGB32F(0.0f);\n\n    float3 shadow_ray_origin = closest_hit_info.inter_point;\n    float3 shadow_ray_direction = light_sample.point_on_light - shadow_ray_origin;\n    float distance_to_light = hippt::length(shadow_ray_direction);\n    float3 shadow_ray_direction_normalized = shadow_ray_direction / distance_to_light;\n\n    hiprtRay shadow_ray;\n    shadow_ray.origin = shadow_ray_origin;\n    shadow_ray.direction = shadow_ray_direction_normalized;\n\n    ColorRGB32F light_source_radiance;\n    // abs() here to allow backfacing light sources\n    float dot_light_source = compute_cosine_term_at_light_source(light_sample.light_source_normal, -shadow_ray.direction);\n\n    if (dot_light_source > 0.0f)\n    {\n        NEEPlusPlusContext nee_plus_plus_context;\n        nee_plus_plus_context.point_on_light = light_sample.point_on_light;\n        nee_plus_plus_context.shaded_point = shadow_ray_origin;\n        bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n\n        if (!in_shadow)\n        {\n            float bsdf_pdf;\n\n            BSDFIncidentLightInfo incident_light_info = light_sample.incident_light_info;\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE && DirectLightSamplingBaseStrategy == LSS_BASE_REGIR\n            BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray.direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n#else\n            BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray.direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n#endif\n            ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n            if (bsdf_pdf != 0.0f)\n            {\n                // Conversion to solid angle from surface area measure\n                float light_sample_solid_angle_pdf = area_to_solid_angle_pdf(light_sample.area_measure_pdf, distance_to_light, dot_light_source);\n                if (light_sample_solid_angle_pdf > 0.0f)\n                {\n                    float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, shadow_ray.direction));\n                    light_source_radiance = light_sample.emission * cosine_term * bsdf_color / light_sample_solid_angle_pdf / nee_plus_plus_context.unoccluded_probability;\n\n                    // Just a CPU-only sanity check\n                    sanity_check</* CPUOnly */ true>(render_data, light_source_radiance, 0, 0);\n                }\n            }\n        }\n    }\n\n    return light_source_radiance;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_one_light_bsdf(const HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    float bsdf_sample_pdf;\n    float3 sampled_bsdf_direction;\n    BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n\n    BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n    ColorRGB32F bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, sampled_bsdf_direction, bsdf_sample_pdf, random_number_generator);\n\n    ColorRGB32F bsdf_radiance = ColorRGB32F(0.0f);\n    if (bsdf_sample_pdf > 0.0f)\n    {\n        hiprtRay new_ray;\n        new_ray.origin = closest_hit_info.inter_point;\n        new_ray.direction = sampled_bsdf_direction;\n\n        BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n        bool intersection_found = evaluate_bsdf_light_sample_ray(render_data, new_ray, 1.0e35f, shadow_light_ray_hit_info, closest_hit_info.primitive_index, ray_payload.bounce, random_number_generator);\n\n        // Checking that we did hit something and if we hit something,\n        // it needs to be emissive\n        if (intersection_found && !shadow_light_ray_hit_info.hit_emission.is_black() && compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -sampled_bsdf_direction) > 0.0f)\n        {\n            float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, sampled_bsdf_direction));\n            bsdf_radiance = bsdf_color * cosine_term * shadow_light_ray_hit_info.hit_emission / bsdf_sample_pdf;\n\n            // Just a CPU-only sanity check\n            sanity_check</* CPUOnly */ true>(render_data, bsdf_radiance, 0, 0);\n        }\n    }\n\n    return bsdf_radiance;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_one_light_MIS(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F light_source_radiance_mis;\n\n    if (MaterialUtils::can_do_light_sampling(ray_payload.material))\n    {\n        LightSampleInformation light_sample = sample_one_emissive_triangle(render_data,\n            closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, \n            closest_hit_info.primitive_index, ray_payload,\n            random_number_generator);\n\n        // Can happen for very small triangles that the PDF of the sampled triangle couldn't be computed\n        if (light_sample.area_measure_pdf > 0.0f)\n        {\n            float3 shadow_ray_direction = light_sample.point_on_light - closest_hit_info.inter_point;\n            float distance_to_light = hippt::length(shadow_ray_direction);\n            float3 shadow_ray_direction_normalized = shadow_ray_direction / distance_to_light;\n\n            hiprtRay shadow_ray;\n            shadow_ray.origin = closest_hit_info.inter_point;\n            shadow_ray.direction = shadow_ray_direction_normalized;\n\n            NEEPlusPlusContext nee_plus_plus_context;\n            nee_plus_plus_context.point_on_light = light_sample.point_on_light;\n            nee_plus_plus_context.shaded_point = shadow_ray.origin;\n            bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n\n            if (!in_shadow)\n            {\n                float bsdf_pdf;\n                BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n                BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray.direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n                ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n                if (bsdf_pdf > 0.0f)\n                {\n                    float cos_theta_at_light_source = compute_cosine_term_at_light_source(light_sample.light_source_normal, -shadow_ray.direction);\n\n                    // Preventing division by 0 in the conversion to solid angle here\n                    if (cos_theta_at_light_source > 1.0e-5f)\n                    {\n                        float light_sample_solid_angle_pdf = area_to_solid_angle_pdf(light_sample.area_measure_pdf, distance_to_light, cos_theta_at_light_source);\n                        float mis_weight = balance_heuristic(light_sample_solid_angle_pdf, bsdf_pdf);\n\n                        float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, shadow_ray.direction));\n                        light_source_radiance_mis = bsdf_color * cosine_term * light_sample.emission * mis_weight / light_sample_solid_angle_pdf / nee_plus_plus_context.unoccluded_probability;\n\n                        // Just a CPU-only sanity check\n                        sanity_check</* CPUOnly */ true>(render_data, light_source_radiance_mis, 0, 0);\n                    }\n                }\n            }\n        }\n    }\n\n    float bsdf_sample_pdf;\n    float3 sampled_bsdf_direction;\n    float3 bsdf_shadow_ray_origin = closest_hit_info.inter_point;\n    BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n    ColorRGB32F bsdf_radiance_mis;\n\n    unsigned int previous_seed = random_number_generator.m_state.seed;\n\n    random_number_generator.m_state.seed = previous_seed;\n    BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n    ColorRGB32F bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, sampled_bsdf_direction, bsdf_sample_pdf, random_number_generator);\n\n    if (bsdf_sample_pdf > 0.0f)\n    {\n        hiprtRay new_ray;\n        new_ray.origin = bsdf_shadow_ray_origin;\n        new_ray.direction = sampled_bsdf_direction;\n\n        BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n        bool intersection_found = evaluate_bsdf_light_sample_ray(render_data, new_ray, 1.0e35f, shadow_light_ray_hit_info, closest_hit_info.primitive_index, ray_payload.bounce, random_number_generator);\n\n        // Checking that we did hit something and if we hit something,\n        // it needs to be emissive\n        //\n        // We're also checking if the light is backfacing maybe with compute_cosine_term()\n        if (intersection_found && !shadow_light_ray_hit_info.hit_emission.is_black() && compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -sampled_bsdf_direction) > 0.0f)\n        {\n            float light_pdf_solid_angle = pdf_of_emissive_triangle_hit_solid_angle(render_data, shadow_light_ray_hit_info, sampled_bsdf_direction);\n            float mis_weight = balance_heuristic(bsdf_sample_pdf, light_pdf_solid_angle);\n\n            // Using abs here because we want the dot product to be positive.\n            // You may be thinking that if we're doing this, then we're not going to discard BSDF\n            // sampled direction that are below the surface (whereas we should discard them).\n            // That would be correct but bsdf_dispatcher_sample return a PDF == 0.0f if a bad\n            // direction was sampled and if the PDF is 0.0f, we never get to this line of code\n            // you're reading. If we are here, this is because we sampled a direction that is\n            // correct for the BSDF. Even if the direction is correct, the dot product may be\n            // negative in the case of refractions / total internal reflections and so in this case,\n            // we'll need to negative the dot product for it to be positive\n            float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, sampled_bsdf_direction));\n            bsdf_radiance_mis = bsdf_color * cosine_term * shadow_light_ray_hit_info.hit_emission * mis_weight / bsdf_sample_pdf;\n\n            // Just a CPU-only sanity check\n            sanity_check</* CPUOnly */ true>(render_data, bsdf_radiance_mis, 0, 0);\n        }\n    }\n\n    return light_source_radiance_mis + bsdf_radiance_mis;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_multiple_emissive_geometry(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F direct_light_contribution;\n\n    // Any of these light sampling strategy support sampling multiple lights\n    // per each shading point, effectively \"amortizing\" camera and bounce rays\n    for (int i = 0; i < DirectLightSamplingNEESampleCount; i++)\n    {\n#if DirectLightSamplingBaseStrategy == LSS_BASE_REGIR && DirectLightSamplingStrategy != LSS_BSDF\n        // ReGIR has its own special path to optimize things a bit.\n        // \n        // Also, BSDF sampling only can be handled by the usual path because then\n        // ReGIR isn't used\n        direct_light_contribution += sample_one_light_ReGIR(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n\n#else // Not ReGIR\n\n#if DirectLightSamplingStrategy == LSS_ONE_LIGHT\n        direct_light_contribution += sample_one_light_no_MIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif DirectLightSamplingStrategy == LSS_BSDF\n        direct_light_contribution += sample_one_light_bsdf(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif DirectLightSamplingStrategy == LSS_MIS_LIGHT_BSDF\n        direct_light_contribution += sample_one_light_MIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif DirectLightSamplingStrategy == LSS_RIS_BSDF_AND_LIGHT\n        direct_light_contribution += sample_lights_RIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#endif\n\n#endif // #if ReGIR\n    }\n\n    return direct_light_contribution / DirectLightSamplingNEESampleCount;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_one_light_ReSTIR_DI(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, \n    const float3& view_direction, \n    Xorshift32Generator& random_number_generator, int2 pixel_coords)\n{\n    // ReSTIR DI doesn't support explicitely looping to sample\n    // multiple lights per shading point so that's why we don't\n    // have a loop for it\n\n    ColorRGB32F direct_light_contribution;\n    if (ray_payload.bounce == 0)\n        // Can only do ReSTIR DI on the first bounce\n        direct_light_contribution = sample_light_ReSTIR_DI(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator, pixel_coords);\n    else\n    {\n        // ReSTIR DI isn't used for the secondary/tertiary/... bounces\n        // so there we can take multiple light samples per path vertex\n        for (int i = 0; i < DirectLightSamplingNEESampleCount; i++)\n        {\n#if ReSTIR_DI_LaterBouncesSamplingStrategy == RESTIR_DI_LATER_BOUNCES_UNIFORM_ONE_LIGHT\n            direct_light_contribution += sample_one_light_no_MIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif ReSTIR_DI_LaterBouncesSamplingStrategy == RESTIR_DI_LATER_BOUNCES_BSDF\n            direct_light_contribution += sample_one_light_bsdf(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif ReSTIR_DI_LaterBouncesSamplingStrategy == RESTIR_DI_LATER_BOUNCES_MIS_LIGHT_BSDF\n            direct_light_contribution += sample_one_light_MIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#elif ReSTIR_DI_LaterBouncesSamplingStrategy == RESTIR_DI_LATER_BOUNCES_RIS_BSDF_AND_LIGHT\n            direct_light_contribution += sample_lights_RIS(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n#endif\n        }\n\n        direct_light_contribution /= DirectLightSamplingNEESampleCount;\n    }\n\n    return direct_light_contribution;\n}\n\n/**\n * Importance sample lights in the scene with NEE\n * \n * Just a random note for myself and maybe future readers\n * that are wondering the same:\n * \n * In the case where we shot a ray (camera ray or indirect bounce ray, doesn't matter)\n * and we hit an emissive material, we should still estimate NEE at that point. i.e. we\n * should also do NEE when standing on emissive materials because emissive materials can\n * reflect light just fine (unless they are blackbodies). \n * \n * Consider a glowing light bulb for example: this is just metal so hot that it glows\n * but because this is metal, it also reflects light.\n * \n * I think the better morale to remember is that the material being emissive doesn't matter at\n * all. As long as the material itself reflects light, then we should do NEE.\n */\nHIPRT_DEVICE ColorRGB32F sample_emissive_geometry(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, \n    const float3& view_direction, \n    Xorshift32Generator& random_number_generator, int2 pixel_coords)\n{\n    if (render_data.buffers.emissive_triangles_count == 0 \n        && !(render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP && DirectLightSamplingStrategy == LSS_RESTIR_DI))\n        // No emissive geometry in the scene to sample\n        // And we're not sampling the envmap with ReSTIR DI which means\n        // that we're not sampling anything so return black\n        return ColorRGB32F(0.0f);\n\n    if (render_data.bsdfs_data.white_furnace_mode && render_data.bsdfs_data.white_furnace_mode_turn_off_emissives)\n        return ColorRGB32F(0.0f);\n\n    ColorRGB32F material_self_textured_emission;\n    if (ray_payload.material.emissive_texture_used)\n        // If the material is using an emissive texture, we will add its emission to the NEE estimation\n        // because we're not importance sampling emissive textures so we're doing it the brute force\n        // way for now (there are some things about light warping I think to properly sample emissive\n        // textures but haven't read too much of that)\n        material_self_textured_emission = ray_payload.material.emission;\n\n    ColorRGB32F direct_light_contribution;\n#if DirectLightSamplingStrategy == LSS_NO_DIRECT_LIGHT_SAMPLING\n    direct_light_contribution = ColorRGB32F(0.0f);\n#else // A light sampling strategy is used\n\n#if DirectLightSamplingStrategy != LSS_RESTIR_DI\n    // A light sampling strategy that is not ReSTIR DI\n    // meaning that we can sample more than 1 light per\n    // path vertex\n    direct_light_contribution = sample_multiple_emissive_geometry(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n\n#elif DirectLightSamplingStrategy == LSS_RESTIR_DI\n    direct_light_contribution = sample_one_light_ReSTIR_DI(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator, pixel_coords);\n#endif\n#endif\n\n    return direct_light_contribution + material_self_textured_emission;\n}\n\nHIPRT_DEVICE ColorRGB32F clamp_direct_lighting_estimation(ColorRGB32F direct_lighting_contribution, float direct_contribution_clamp, int bounce)\n{\n    return clamp_light_contribution(direct_lighting_contribution, direct_contribution_clamp, bounce > 0);\n\n}\n\n/**\n * The x & y parameters are only used if using ReSTIR DI (they are for fetching the ReSTIR DI reservoir).\n * They can be ignored if not using ReSTIR DI\n */\nHIPRT_DEVICE ColorRGB32F estimate_direct_lighting(HIPRTRenderData& render_data, RayPayload& ray_payload, ColorRGB32F custom_ray_throughput, HitInfo& closest_hit_info,\n    float3 view_direction,\n    int x, int y,\n    Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F total_direct_lighting;\n\n    ColorRGB32F emissive_geometry_direct_contribution = sample_emissive_geometry(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator, make_int2(x, y));\n    ColorRGB32F envmap_direct_contribution = sample_environment_map(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n\n    // Clamping direct lighting\n    emissive_geometry_direct_contribution = clamp_light_contribution(emissive_geometry_direct_contribution, render_data.render_settings.direct_contribution_clamp, ray_payload.bounce == 0);\n    envmap_direct_contribution = clamp_light_contribution(envmap_direct_contribution, render_data.render_settings.envmap_contribution_clamp, ray_payload.bounce == 0);\n\n#if DirectLightSamplingStrategy == LSS_NO_DIRECT_LIGHT_SAMPLING // No direct light sampling\n    ColorRGB32F hit_emission = ray_payload.material.emission;\n    hit_emission = clamp_light_contribution(hit_emission, render_data.render_settings.indirect_contribution_clamp, ray_payload.bounce > 0);\n\n    total_direct_lighting += hit_emission * custom_ray_throughput;\n#else\n    if (ray_payload.bounce == 0)\n        // If we do have emissive geometry sampling, we only want to take\n        // it into account on the first bounce, otherwise we would be\n        // accounting for direct light sampling twice (bounce on emissive\n        // geometry + direct light sampling). Otherwise, we don't check for bounce == 0\n        total_direct_lighting += ray_payload.material.emission;\n\n    // Clamped indirect lighting \n    ColorRGB32F direct_lighting_contribution = (emissive_geometry_direct_contribution + envmap_direct_contribution) * custom_ray_throughput;\n\n    total_direct_lighting += direct_lighting_contribution;\n#endif\n\n    return total_direct_lighting;\n}\n\n/**\n * The x & y parameters are only used if using ReSTIR DI (they are for fetching the ReSTIR DI reservoir).\n * They can be ignored if not using ReSTIR DI\n */\nHIPRT_DEVICE ColorRGB32F estimate_direct_lighting_no_clamping(HIPRTRenderData& render_data, RayPayload& ray_payload, ColorRGB32F custom_ray_throughput, HitInfo& closest_hit_info,\n    float3 view_direction,\n    int x, int y, Xorshift32Generator& random_number_generator)\n{\n    return estimate_direct_lighting(render_data, ray_payload, custom_ray_throughput, closest_hit_info, view_direction, x, y, random_number_generator);\n}\n\n/**\n * The x & y parameters are only used if using ReSTIR DI (they are for fetching the ReSTIR DI reservoir).\n * They can be ignored if not using ReSTIR DI\n */\nHIPRT_DEVICE ColorRGB32F estimate_direct_lighting(HIPRTRenderData& render_data, RayPayload& ray_payload, HitInfo& closest_hit_info, \n    float3 view_direction,\n    int x, int y,\n    Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F unclamped_direct_lighting = estimate_direct_lighting(render_data, ray_payload, ray_payload.throughput, closest_hit_info, view_direction, x, y, random_number_generator);\n\n    return clamp_direct_lighting_estimation(unclamped_direct_lighting, render_data.render_settings.indirect_contribution_clamp, ray_payload.bounce);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/LightSampling/PDFConversion.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_PDF_CONVERSION_H\n#define DEVICE_INCLUDES_PDF_CONVERSION_H\n\n#include \"HostDeviceCommon/KernelOptions/DirectLightSamplingOptions.h\"\n#include \"HostDeviceCommon/Math.h\"\n\n /**\n  * Returns the cosine term of the given light source normal and the direction to the light source\n  * 'minus_direction_to_light' must be the direction *towards* the light but *negated*, such that\n  * dot(light_source_normal, minus_direction_to_light) > 0.0f (if the light isn't backfacing us)\n  *\n  * This function does the branching that allows backfacing lights or not\n  */\nHIPRT_INLINE HIPRT_DEVICE float compute_cosine_term_at_light_source(float3 light_source_normal, float3 minus_direction_to_light)\n{\n    // The cosine term is the dot product between the light source normal and the direction to the shading point\n\n#if DirectLightSamplingAllowBackfacingLights == KERNEL_OPTION_TRUE\n    // abs() to allow backfacing lights\n    return hippt::abs(hippt::dot(light_source_normal, minus_direction_to_light));\n#else\n    // clamping to 0 to disallow backfacing lights\n    return hippt::max(0.0f, hippt::dot(light_source_normal, minus_direction_to_light));\n#endif\n}\n\nHIPRT_INLINE HIPRT_HOST_DEVICE float area_to_solid_angle_pdf(float area_pdf, float distance, float cos_theta_at_light_source)\n{\n    if (cos_theta_at_light_source < 1.0e-8f)\n        return 0.0f;\n\n    return area_pdf * hippt::square(distance) / cos_theta_at_light_source;\n}\n\nHIPRT_INLINE HIPRT_HOST_DEVICE float solid_angle_to_area_pdf(float solid_angle_pdf, float distance, float cos_theta_at_light_source)\n{\n    if (cos_theta_at_light_source < 1.0e-8f)\n        return 0.0f;\n\n    return solid_angle_pdf / hippt::square(distance) * cos_theta_at_light_source;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Material.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_MATERIAL_H\n#define DEVICE_MATERIAL_H\n\n#include \"Device/includes/Texture.h\"\n\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/Material/MaterialUtils.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n#ifndef __KERNELCC__\n#include \"Image/Image.h\"\n#endif\n\ntemplate <typename T>\nHIPRT_DEVICE HIPRT_INLINE T get_material_property(const HIPRTRenderData& render_data, bool is_srgb, const float2& texcoords, int texture_index);\nHIPRT_DEVICE HIPRT_INLINE float2 get_metallic_roughness(const HIPRTRenderData& render_data, const float2& texcoords, int metallic_texture_index, int roughness_texture_index, int metallic_roughness_texture_index);\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F get_base_color(const HIPRTRenderData& render_data, float& out_alpha, const float2& texcoords, int base_color_texture_index);\n\nHIPRT_DEVICE HIPRT_INLINE float get_hit_base_color_alpha(const HIPRTRenderData& render_data, unsigned short int base_color_texture_index, int prim_id, float2 uv)\n{\n    if (base_color_texture_index == MaterialConstants::NO_TEXTURE)\n        // Quick exit if no texture\n        return 1.0f;\n\n    float2 texcoords = uv_interpolate(render_data.buffers.triangles_indices, prim_id, render_data.buffers.texcoords, uv);\n\n    // Getting the alpha for transparency check to see if we need to pass the ray through or not\n    float alpha;\n    get_base_color(render_data, alpha, texcoords, base_color_texture_index);\n\n    return alpha;\n}\n\nHIPRT_DEVICE HIPRT_INLINE float get_hit_base_color_alpha(const HIPRTRenderData& render_data, const DevicePackedTexturedMaterial& material, hiprtHit hit)\n{\n    return get_hit_base_color_alpha(render_data, material.get_base_color_texture_index(), hit.primID, hit.uv);\n}\n\nHIPRT_DEVICE HIPRT_INLINE float get_hit_base_color_alpha(const HIPRTRenderData& render_data, int prim_id, float2 uv)\n{\n    int material_index = render_data.buffers.material_indices[prim_id];\n    unsigned short int base_color_texture_index = render_data.buffers.materials_buffer.get_base_color_texture_index(material_index);\n\n    return get_hit_base_color_alpha(render_data, base_color_texture_index, prim_id, uv);\n}\n\nHIPRT_DEVICE HIPRT_INLINE float get_hit_base_color_alpha(const HIPRTRenderData& render_data, hiprtHit hit)\n{\n    int material_index = render_data.buffers.material_indices[hit.primID];\n    unsigned short int base_color_texture_index = render_data.buffers.materials_buffer.get_base_color_texture_index(material_index);\n\n    return get_hit_base_color_alpha(render_data, base_color_texture_index, hit.primID, hit.uv);\n}\n\nHIPRT_DEVICE HIPRT_INLINE DeviceUnpackedEffectiveMaterial get_intersection_material(const HIPRTRenderData& render_data, int material_index, float2 texcoords)\n{\n    DeviceUnpackedTexturedMaterial material = render_data.buffers.materials_buffer.read_partial_material(material_index).unpack();\n\n    float trash_alpha;\n    if (render_data.bsdfs_data.white_furnace_mode)\n        material.base_color = ColorRGB32F(1.0f);\n    else\n    {\n        if (material.base_color_texture_index != MaterialConstants::NO_TEXTURE)\n            material.base_color = get_base_color(render_data, trash_alpha, texcoords, material.base_color_texture_index);\n    }\n\n    // Reading some parameters from the textures\n    float2 roughness_metallic = get_metallic_roughness(render_data, texcoords, material.metallic_texture_index, material.roughness_texture_index, material.roughness_metallic_texture_index);\n    if (material.roughness_metallic_texture_index != MaterialConstants::NO_TEXTURE)\n    {\n        material.roughness = roughness_metallic.x;\n        material.metallic = roughness_metallic.y;\n    }\n    else\n    {\n        if (material.roughness_texture_index != MaterialConstants::NO_TEXTURE)\n            material.roughness = roughness_metallic.x;\n\n        if (material.metallic_texture_index != MaterialConstants::NO_TEXTURE)\n            material.metallic = roughness_metallic.y;\n\n        // If not reading from a texture, setting the roughness into the roughness_metallic\n        // variable because the roughness is going to be used later\n        roughness_metallic.x = material.roughness;\n    }\n\n    float anisotropy = get_material_property<float>(render_data, false, texcoords, material.anisotropic_texture_index);\n    if (material.anisotropic_texture_index != MaterialConstants::NO_TEXTURE)\n        material.anisotropy = anisotropy;\n    \n    float specular = get_material_property<float>(render_data, false, texcoords, material.specular_texture_index);\n    if (material.specular_texture_index != MaterialConstants::NO_TEXTURE)\n        material.specular = specular;\n\n    float coat = get_material_property<float>(render_data, false, texcoords, material.coat_texture_index);\n    if (material.coat_texture_index != MaterialConstants::NO_TEXTURE)\n        material.coat = coat;\n    else\n        coat = material.coat;\n\n    float sheen = get_material_property<float>(render_data, false, texcoords, material.sheen_texture_index);\n    if (material.sheen_texture_index != MaterialConstants::NO_TEXTURE)\n        material.sheen = sheen;\n\n    float specular_transmission = get_material_property<float>(render_data, false, texcoords, material.specular_transmission_texture_index);\n    if (material.specular_transmission_texture_index != MaterialConstants::NO_TEXTURE)\n        material.specular_transmission = specular_transmission;\n\n    ColorRGB32F emission = get_material_property<ColorRGB32F>(render_data, false, texcoords, material.emission_texture_index);\n    if (material.emission_texture_index == MaterialConstants::NO_TEXTURE || material.emission_texture_index == MaterialConstants::CONSTANT_EMISSIVE_TEXTURE)\n        emission = material.emission;\n\n    DeviceUnpackedEffectiveMaterial unpacked_material(material);\n\n    unpacked_material.emissive_texture_used = material.emission_texture_index != MaterialConstants::NO_TEXTURE;\n    unpacked_material.emission = emission;\n    // Roughening of the base roughness and second metallic roughness based\n    // on the coat roughness. This should be precomputed instead of being done here\n    //\n    // Reference: [OpenPBR Surface 2024 Specification] https://academysoftwarefoundation.github.io/OpenPBR/#model/coat/roughening\n    float coat_roughening = unpacked_material.coat_roughening;\n    if (coat > 0.0f && coat_roughening > 0.0f)\n    {\n        float base_roughness = roughness_metallic.x;\n        float coat_roughness = unpacked_material.coat_roughness;\n\n        // Roughening of the base roughness of the material based on the coat roughness\n        float target_base_roughness = hippt::pow_1_4(hippt::min(1.0f, hippt::pow_4(base_roughness) + 2.0f * hippt::pow_4(coat_roughness)));\n        float roughened_base_roughness = hippt::lerp(base_roughness, target_base_roughness, coat);\n        unpacked_material.roughness = hippt::lerp(base_roughness, roughened_base_roughness, coat_roughening);\n\n        if (unpacked_material.second_roughness_weight > 0.0f)\n        {\n            // Roughening of the second metallic roughness based on the coat roughness\n\n            float second_roughness = unpacked_material.second_roughness;\n            float target_second_metal_roughness = hippt::pow_1_4(hippt::min(1.0f, hippt::pow_4(second_roughness) + 2.0f * hippt::pow_4(coat_roughness)));\n            float roughened_second_metal_roughness = hippt::lerp(second_roughness, target_second_metal_roughness, coat);\n            unpacked_material.second_roughness = hippt::lerp(second_roughness, roughened_second_metal_roughness, coat_roughening);\n        }\n    }\n\n    return unpacked_material;\n}\n\n/**\n * The float2 returned is (roughness, metallic)\n */\nHIPRT_DEVICE HIPRT_INLINE float2 get_metallic_roughness(const HIPRTRenderData& render_data, const float2& texcoords, int metallic_texture_index, int roughness_texture_index, int metallic_roughness_texture_index)\n{\n    float2 out;\n\n    if (metallic_roughness_texture_index != MaterialConstants::NO_TEXTURE)\n    {\n        ColorRGB32F rgb = sample_texture_rgb_8bits(render_data.buffers.material_textures, metallic_roughness_texture_index, false, texcoords);\n\n        // Not converting to linear here because material properties (roughness and metallic) here are assumed to be linear already\n        out.x = rgb.g;\n        out.y = rgb.b;\n    }\n    else\n    {\n        out.x = get_material_property<float>(render_data, false, texcoords, roughness_texture_index);\n        out.y = get_material_property<float>(render_data, false, texcoords, metallic_texture_index);\n    }\n\n    return out;\n}\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F get_base_color(const HIPRTRenderData& render_data, float& out_alpha, const float2& texcoords, int base_color_texture_index)\n{\n    out_alpha = 1.0f;\n    ColorRGBA32F rgba = get_material_property<ColorRGBA32F>(render_data, true, texcoords, base_color_texture_index);\n    if (base_color_texture_index != MaterialConstants::NO_TEXTURE)\n    {\n        ColorRGB32F base_color = ColorRGB32F(rgba.r, rgba.g, rgba.b);\n        out_alpha = rgba.a;\n\n        return base_color;\n    }\n\n    return ColorRGB32F();\n}\n\ntemplate <typename T>\nHIPRT_DEVICE HIPRT_INLINE T read_data(const ColorRGBA32F& rgba) {}\n\ntemplate<>\nHIPRT_DEVICE HIPRT_INLINE ColorRGBA32F read_data<ColorRGBA32F>(const ColorRGBA32F& rgba)\n{\n    return rgba;\n}\n\ntemplate<>\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F read_data<ColorRGB32F>(const ColorRGBA32F& rgba)\n{\n    return ColorRGB32F(rgba.r, rgba.g, rgba.b);\n}\n\ntemplate<>\nHIPRT_DEVICE HIPRT_INLINE float read_data<float>(const ColorRGBA32F& rgba)\n{\n    return rgba.r;\n}\n\ntemplate <typename T>\nHIPRT_DEVICE HIPRT_INLINE T get_material_property(const HIPRTRenderData& render_data, bool is_srgb, const float2& texcoords, int texture_index)\n{\n    if (texture_index == MaterialConstants::NO_TEXTURE || texture_index == MaterialConstants::CONSTANT_EMISSIVE_TEXTURE)\n        return T();\n\n    ColorRGBA32F rgba = sample_texture_rgba(render_data.buffers.material_textures, texture_index, is_srgb, texcoords);\n    return read_data<T>(rgba);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/NEE++/NEE++.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_NEE_PLUS_PLUS\n#define DEVICE_INCLUDES_NEE_PLUS_PLUS\n\n#include \"Device/includes/HashGrid.h\"\n#include \"Device/includes/HashGridHash.h\"\n\n#include \"HostDeviceCommon/Math.h\"\n\n/**\n * Context passed when tracing shadow rays \n */\nstruct NEEPlusPlusContext\n{\n\tfloat3 shaded_point;\n\tfloat3 point_on_light;\n\n\t// After passing this context to a call to 'evaluate_shadow_ray_nee_plus_plus',\n\t// this member will be filled with the probability that the points 'shaded_point'\n\t// and 'point_on_light' are mutually visible.\n\t//\n\t// If the call to 'evaluate_shadow_ray_nee_plus_plus' returns 'false' i.e. that the\n\t// points are mutually visibile, you will need to account this\n\t// 'unoccluded_probability' in the PDF of the light you sampled i.e. multiply\n\t// your PDF by this 'unoccluded_probability' to guarantee unbiasedness\n\tfloat unoccluded_probability = 1.0f;\n\t// Set this flag to true if this context should be used\n\t// for testing visibility probability between 'shaded_point' and the\n\t// envmap.\n\t//\n\t// ----- WARNING:\n\t// 'point_on_light' should be the normalized direction towards the envmap if this is set to true\n\tbool envmap = false;\n};\n\nstruct NEEPlusPlusEntry\n{\n\tAtomicType<unsigned int>* total_unoccluded_rays = nullptr;\n\tAtomicType<unsigned int>* total_num_rays = nullptr;\n\n\tAtomicType<unsigned int>* checksum_buffer = nullptr;\n};\n\n/**\n * Structure that contains the data for the implementation of NEE++.\n * \n * Reference:\n * [1] [Next Event Estimation++: Visibility Mapping for Efficient Light Transport Simulation]\n */\nstruct NEEPlusPlusDevice\n{\n\t// If true, the next camera rays kernel call will reset the visibility map\n\tbool m_reset_visibility_map = false;\n\t// If true, the grid visibility will be updated this frame (new visibility values will be accumulated)\n\tbool m_update_visibility_map = true;\n\t// Whether or not to do russian roulette with NEE++ on emissive lights\n\tbool m_enable_nee_plus_plus_RR_for_emissives = true;\n\t// Whether or not to do russian roulette with NEE++ on envmap samples\n\tbool m_enable_nee_plus_plus_RR_for_envmap = false;\n\n\tunsigned int m_total_number_of_cells = 0;\n\tfloat m_grid_cell_min_size = 0.25f;\n\tfloat m_grid_cell_target_projected_size = 25.0f;\n\n\t// After how many samples to stop updating the visibility map\n\t// (because it's probably converged enough)\n\tint m_stop_update_samples = 256;\n\n\tenum BufferNames : unsigned int\n\t{\n\t\tVISIBILITY_MAP_UNOCCLUDED_COUNT = 0,\n\t\tVISIBILITY_MAP_TOTAL_COUNT = 1,\n\t};\n\n\t// Linear buffer that is a packing of 4 buffers:\n\t// \n\t// - 1 buffer that stores the number of rays that were\n\t//\t\tcomputed as non-occluded from voxel to voxel in the scene.\n\t//\n\t//\t\tFor example, if 16 rays were shot from one voxel to another\n\t//\t\tand 7 of these rays were found to be unoccluded, then the corresponding\n\t//\t\tentry in the map will contain the value 7\n\t//\n\t//\t\tBecause the visibility map is symmetrical, this is a linear buffer that contains\n\t//\t\tonly half of the visibility matrix\n\t//\n\t//\t\tFor the indexing logic, (0, 0) is in the top left corner of the matrix\n\t//\n\t// - 1 buffer that is the same the same as the previous one but stores how many rays\n\t//\t\tin total were traced in total from one voxel to another, not just the unoccluded ones. \n\t//\t\tIn the example from above, this would contain the value 16.\n\t//\n\t//\t\tFor the indexing logic, (0, 0) is in the top left corner of the matrix\n\t//\n\t// - 2 buffers used for accumulation during the rendering process\n\t//\t\tThese two buffers are used for accumulation of the visibility information during the rendering\n\t//\t\tFor example, if we trace a shadow ray between voxel A and voxel B and that this shadow ray is\n\t//\t\toccluded, we're going to have to update the visibility map with information.\n\t// \n\t//\t\tHowever, we cannot just simply update the visibility map (i.e. the 2 first buffers)\n\t//\t\tduring the rendering because this would\tlead to concurrency issues where the map is \n\t//\t\tbeing updated while also being read by other threads.\n\t// \n\t//\t\tThe race condition is fine, what's not fine is that this will vary the estimate of the occlusion probability\n\t//\t\tfrom voxel A to voxel B and I found that this resulted in bias / non-determinism because the order in which\n\t//\t\tthe threads update the map now influences how the other threads are going to read the map\n\t//\n\t//\t\tSo instead we have some additional buffers here to accumulate separately and then this buffers are copied\n\t//\t\tevery N frames (or N seconds) to the 'true' visibility map used during rendering\n\t//\n\t// Each one these 4 buffers are of type unsigned chars, packed into 1 unsigned ints.\n\t// \n\t// The data is stored such that the first unsigned int contains the 4 buffers at index 0 of the matrix\n\t// The second unsigned int contains the 4 buffers at index 1\n\t// ...\n\tNEEPlusPlusEntry m_entries_buffer;\n\n\t// Counter that keep tracks of how many cells are currently used in the hash grid\n\tAtomicType<unsigned int>* m_total_cells_alive_count = nullptr;\n\n\t// If a voxel-to-voxel unocclusion probability is higher than that, the voxel will be considered unoccluded\n\t// and so a shadow ray will be traced. This is to avoid trusting voxel that have a low probability of\n\t// being unoccluded\n\t//\n\t// 0.0f basically disables NEE++ as any entry of the visibility map will require a shadow ray\n\tfloat m_confidence_threshold = 0.025f;\n\tfloat m_minimum_unoccluded_proba = 0.0f;\n\n\t// Whether or not to count the number of shadow rays actually traced vs. the number of shadow\n\t// queries made. This is used in 'evaluate_shadow_ray_nee_plus_plus()'\n\tbool do_update_shadow_rays_traced_statistics = true;\n\n\tAtomicType<unsigned long long int>* m_total_shadow_ray_queries = nullptr;\n\tAtomicType<unsigned long long int>* m_shadow_rays_actually_traced = nullptr;\n\n\tHIPRT_HOST_DEVICE void accumulate_visibility(bool visible, unsigned int hash_grid_index)\n\t{\n\t\tif (hash_grid_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t// One of the two points was outside the scene, cannot cache this\n\t\t\treturn;\n\t\t\n\t\tif (read_buffer<BufferNames::VISIBILITY_MAP_TOTAL_COUNT>(hash_grid_index) >= 255)\n\t\t\treturn;\n\n\t\tif (visible)\n\t\t\tincrement_buffer<BufferNames::VISIBILITY_MAP_UNOCCLUDED_COUNT>(hash_grid_index, 1);\n\t\tunsigned int total_count_before = increment_buffer<BufferNames::VISIBILITY_MAP_TOTAL_COUNT>(hash_grid_index, 1);\n\n\t\tif (total_count_before == 0)\n\t\t\t// If we just inserted a new cell in the hash grid, that's one more cell alive\n\t\t\thippt::atomic_fetch_add(m_total_cells_alive_count, 1u);\n\t}\n\n\t/**\n\t * Updates the visibility map with one additional entry: whether or not the two given world points are visible\n\t */\n\tHIPRT_HOST_DEVICE void accumulate_visibility(const NEEPlusPlusContext& context, HIPRTCamera& current_camera, bool visible)\n\t{\n\t\treturn accumulate_visibility(visible, get_visibility_map_index<true>(context, current_camera));\n\t}\n\n\t/**\n\t * Returns the estimated probability that a ray between the two given world points \n\t * is going to be unoccluded (i.e. the two points are mutually visible)\n\t * \n\t * Returns the index in the visibility matrix of the voxel-to-voxel correspondance of the\n\t * two given points. This value can then be passed as argument to 'accumulate_visibility'\n\t * to save a little bit of computations (otherwise, 'accumulate_visibility' would have recomputed\n\t * that value on its own even though the world points given may be the same and thus, the matrix\n\t * index is the same)\n\t */\n\tHIPRT_HOST_DEVICE float estimate_visibility_probability(const NEEPlusPlusContext& context, const HIPRTCamera& current_camera, unsigned int& out_hash_grid_index, unsigned int& out_cell_total_accumulation_count) const\n\t{\n\t\tout_hash_grid_index = get_visibility_map_index<true>(context, current_camera);\n\t\tif (out_hash_grid_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t// One of the two points was outside the scene, cannot read the cache for this\n\t\t\t// \n\t \t\t// Returning 1.0f indicating that the two points are not occluded such that the caller\n\t\t\t// tests for a shadow ray\n\t\t\treturn 1.0f;\n\n\t\tout_cell_total_accumulation_count = read_buffer<BufferNames::VISIBILITY_MAP_TOTAL_COUNT>(out_hash_grid_index);\n\t\tif (out_cell_total_accumulation_count == 0)\n\t\t\t// No information for these two points\n\t\t\t// \n\t\t\t// Returning 1.0f indicating that the two points are not occluded such that the caller\n\t\t\t// tests for a shadow ray\n\t\t\treturn 1.0f;\n\t\telse\n\t\t{\n\t\t\tunsigned int unoccluded_count = read_buffer<BufferNames::VISIBILITY_MAP_UNOCCLUDED_COUNT>(out_hash_grid_index);\n\t\t\t\n\t\t\tfloat unoccluded_proba = unoccluded_count / static_cast<float>(out_cell_total_accumulation_count);\n\t\t\tif (unoccluded_proba >= m_confidence_threshold)\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\treturn hippt::max(m_minimum_unoccluded_proba, unoccluded_proba);\n\t\t}\n\t}\n\n\t/**\n\t * Returns the estimated probability that a ray between the two given world points\n\t * is going to be unoccluded (i.e. the two points are mutually visible)\n\t */\n\tHIPRT_HOST_DEVICE float estimate_visibility_probability(const NEEPlusPlusContext& context, const HIPRTCamera& current_camera, unsigned int& out_cell_total_accumulation_count) const\n\t{\n\t\tunsigned int trash_matrix_index;\n\n\t\treturn estimate_visibility_probability(context, current_camera, trash_matrix_index, out_cell_total_accumulation_count);\n\t}\n\n\tHIPRT_HOST_DEVICE float estimate_visibility_probability(const NEEPlusPlusContext& context, const HIPRTCamera& current_camera) const\n\t{\n\t\tunsigned int trash_matrix_index;\n\t\tunsigned int trash_accumulation_count;\n\n\t\treturn estimate_visibility_probability(context, current_camera, trash_matrix_index, trash_accumulation_count);\n\t}\n\n\tHIPRT_HOST_DEVICE unsigned int hash_context(const NEEPlusPlusContext& context, const HIPRTCamera& current_camera, unsigned int& out_checksum) const\n\t{\n\t\tfloat3 second_point = context.envmap ? (context.shaded_point + context.point_on_light * 1.0e20f) : context.point_on_light;\n\n\t\treturn hash_double_position_camera(m_total_number_of_cells, context.shaded_point, second_point, current_camera, m_grid_cell_target_projected_size, m_grid_cell_min_size, out_checksum);\n\t}\n\n\ttemplate <bool isInsertion = false>\n\tHIPRT_HOST_DEVICE unsigned int get_visibility_map_index(const NEEPlusPlusContext& context, const HIPRTCamera& current_camera) const\n\t{\n\t\tunsigned int checksum;\n\t\tunsigned int hash_grid_index = hash_context(context, current_camera, checksum);\n\t\tif (!HashGrid::resolve_collision<NEEPlusPlus_LinearProbingSteps, isInsertion>(m_entries_buffer.checksum_buffer, m_total_number_of_cells, hash_grid_index, checksum))\n\t\t{\n\t\t\treturn HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\t\t}\n\n\t\treturn hash_grid_index;\n\t}\n\n\t// TODO compare with the alpha learning rate and the ground truth to see the behavior of a single float buffer\n\t// TODO see if capping at 255 / 65535 is enough\nprivate:\n\t/**\n\t * Returns the value packed in the buffer at the given visibility matrix index and with the given\n\t * buffer name from the BufferNames enum\n\t */\n\ttemplate <unsigned int bufferName>\n\tHIPRT_HOST_DEVICE unsigned int read_buffer(unsigned int hash_grid_index) const\n\t{\n\t\tif constexpr (bufferName == 0)\n\t\t\treturn m_entries_buffer.total_unoccluded_rays[hash_grid_index];\n\t\telse if constexpr (bufferName == 1)\n\t\t\treturn m_entries_buffer.total_num_rays[hash_grid_index];\n\t}\n\n\t/**\n\t * Increments the packed value in the packed buffer 'bufferName' at the given matrix index\n\t * \n\t * There is no protection against overflows in this function\n\t */\n\ttemplate <unsigned int bufferName>\n\tHIPRT_HOST_DEVICE unsigned int increment_buffer(unsigned int hash_grid_index, unsigned int value)\n\t{\n\t\tif constexpr (bufferName == 0)\n\t\t\treturn hippt::atomic_fetch_add(&m_entries_buffer.total_unoccluded_rays[hash_grid_index], value);\n\t\tif constexpr (bufferName == 1)\n\t\t\treturn hippt::atomic_fetch_add(&m_entries_buffer.total_num_rays[hash_grid_index], value);\n\t}\n\n\t/**\n\t * Sets the value in one of the packed buffer\n\t * \n\t * WARNING:\n\t * This function is non-atomic\n\t */\n\ttemplate <unsigned int bufferName>\n\tHIPRT_HOST_DEVICE void set_buffer(unsigned int hash_grid_index, unsigned int value)\n\t{\n\t\tif constexpr (bufferName == 0)\n\t\t\tm_entries_buffer.total_unoccluded_rays[hash_grid_index] = value;\n\t\tif constexpr (bufferName == 1)\n\t\t\tm_entries_buffer.total_num_rays[hash_grid_index] = value;\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/NestedDielectrics.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_NESTED_DIELECTRICS_H\n#define DEVICE_NESTED_DIELECTRICS_H\n\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n\n#include <hiprt/hiprt_common.h>\n\n#ifdef __KERNELCC__\n// On the GPU, the nested dielectrics stack is allocated in shared memory.\n// This means that all the entries of the nested dielectrics stacks are in shared memory.\n//\n// For example, for thread blocks of 64 and a NestedDielectricStackSize of 3, this gives us\n// a shared memory array of 3*64 = 192 entries.\n// \n// We then need a mapping that \"redirects\" each thread to its proper entry in that 192-long array.\n// \n// That's what this macro does, it takes an index in the stack as parameter (so 0, 1 or 2 for a NestedDielectricStackSize of 3)\n// and maps it to the index to use in the shared memory array by using the threadIdx.\n// \n// Note that the mapping is written to minimize shared memory bank conflicts\n#define NESTED_DIELECTRICS_STACK_INDEX_SHIFT(x) (x)\n\n#else\n// This macro is used to offset the index used to index the priority stack.\n// On the CPU, there is nothing to do, just use the given index, there is really nothing\n// special. The special case is for the GPU, explained above the GPU macro definition\n#define NESTED_DIELECTRICS_STACK_INDEX_SHIFT(x) (x)\n#endif\n\n/**\n * Reference:\n *\n * [1] [Simple Nested Dielectrics in Ray Traced Images, Schmidt, 2002]\n */\nstruct StackPriorityEntry\n{\n\t// How many bits for encoding the packed priority\n\t// and its shift to locate the bits in the packed 32bits integer\n\tstatic constexpr unsigned int PRIORITY_BIT_MASK = 0b1111;\n\tstatic constexpr unsigned int PRIORITY_BIT_SHIFT = 0;\n\tstatic constexpr unsigned int PRIORITY_MAXIMUM = PRIORITY_BIT_MASK;\n\t// How many bits for encoding the topmost flag\n\t// and its shift to locate the bits in the packed 32bits integer\n\tstatic constexpr unsigned int TOPMOST_BIT_MASK = 0b1;\n\tstatic constexpr unsigned int TOPMOST_BIT_SHIFT = PRIORITY_BIT_SHIFT + 4;\n\t// How many bits for encoding the odd_parity flag\n\t// and its shift to locate the bits in the packed 32bits integer\n\tstatic constexpr unsigned int ODD_PARTIY_BIT_MASK = 0b1;\n\tstatic constexpr unsigned int ODD_PARTIY_BIT_SHIFT = TOPMOST_BIT_SHIFT + 1;\n\n\t// How many bits for encoding the material_index flag\n\t// and its shift to locate the bits in the packed 32bits integer\n\t// This is the rest of the bits after we've added the other flags\n\tstatic constexpr unsigned int COMBINED_OTHER_FLAGS = (PRIORITY_BIT_MASK << PRIORITY_BIT_SHIFT) | (TOPMOST_BIT_MASK << TOPMOST_BIT_SHIFT) | (ODD_PARTIY_BIT_MASK << ODD_PARTIY_BIT_SHIFT);\n\tstatic constexpr unsigned int MATERIAL_INDEX_BIT_SHIFT = ODD_PARTIY_BIT_SHIFT + 1;\n\tstatic constexpr unsigned int MATERIAL_INDEX_BIT_MASK = (0xffffffff & (~COMBINED_OTHER_FLAGS)) >> MATERIAL_INDEX_BIT_SHIFT;\n\t// This 'MATERIAL_INDEX_MAXIMUM' is just an alias basically\n\tstatic constexpr unsigned int MATERIAL_INDEX_MAXIMUM = MATERIAL_INDEX_BIT_MASK;\n\n\tHIPRT_HOST_DEVICE void set_priority(int priority)\n\t{\n\t\t// Clear\n\t\tpacked_data &= ~(PRIORITY_BIT_MASK << PRIORITY_BIT_SHIFT);\n\t\t// Set\n\t\tpacked_data |= (priority & PRIORITY_BIT_MASK) << PRIORITY_BIT_SHIFT;\n\t}\n\n\tHIPRT_HOST_DEVICE void set_topmost(bool topmost)\n\t{\n\t\t// Clear\n\t\tpacked_data &= ~(TOPMOST_BIT_MASK << TOPMOST_BIT_SHIFT);\n\t\t// Set\n\t\tpacked_data |= (topmost == true) << TOPMOST_BIT_SHIFT;\n\t}\n\n\tHIPRT_HOST_DEVICE void set_odd_parity(bool odd_parity)\n\t{\n\t\t// Clear\n\t\tpacked_data &= ~(ODD_PARTIY_BIT_MASK << ODD_PARTIY_BIT_SHIFT);\n\t\t// Set\n\t\tpacked_data |= (odd_parity == true) << ODD_PARTIY_BIT_SHIFT;\n\t}\n\n\tHIPRT_HOST_DEVICE void set_material_index(int material_index)\n\t{\n\t\t// Clear\n\t\tpacked_data &= ~(MATERIAL_INDEX_BIT_MASK << MATERIAL_INDEX_BIT_SHIFT);\n\t\t// Set\n\t\tpacked_data |= (material_index & MATERIAL_INDEX_BIT_MASK) << MATERIAL_INDEX_BIT_SHIFT;\n\t}\n\n\tHIPRT_HOST_DEVICE int get_priority() const { return (packed_data >> PRIORITY_BIT_SHIFT) & PRIORITY_BIT_MASK; }\n\tHIPRT_HOST_DEVICE bool get_topmost() const { return (packed_data >> TOPMOST_BIT_SHIFT) & TOPMOST_BIT_MASK; }\n\tHIPRT_HOST_DEVICE bool get_odd_parity() const { return (packed_data >> ODD_PARTIY_BIT_SHIFT) & ODD_PARTIY_BIT_MASK; }\n\tHIPRT_HOST_DEVICE int get_material_index() const { return (packed_data >> MATERIAL_INDEX_BIT_SHIFT) & MATERIAL_INDEX_BIT_MASK; }\n\n\t// Packed data contains:\n\t//\t- the priority of the stack entry\n\t//\t- whether or not this is the topmost entry for that material in the stack\n\t//\t- An odd_parity flag\n\t//\t- The material index\n\t// \n\t// We get the bits:\n\t// \n\t// **** *** material index* **** **OT PRIO\n\t// \n\t// With :\n\t// - O the odd_parity flag\n\t// - T the topmost flag\n\t// - PRIO the dielectric priority \n\tunsigned int packed_data;\n};\n\nstruct NestedDielectricsInteriorStack\n{\n\t/**\n\t * Pushes a new material index onto the stack\n\t * \n\t * Returns true if that intersection should be skipped (because we are currently in a material with \n\t * higher priority than the material we just intersected)\n\t * \n\t * Returns false if that intersection should not be skipped\n\t */\n\tHIPRT_HOST_DEVICE bool push(int& out_incident_material_index, int& out_outgoing_material_index, bool& out_inside_material, int material_index, int material_priority)\n\t{\n\t\tif (stack_position == NestedDielectricsStackSize - 1)\n\t\t\t// The stack is already at the maximum\n\t\t\treturn false;\n\t\t\t\n\t\t// Index of the material we last entered before intersecting the\n\t\t// material we're currently inserting in the stack\n\t\tint last_entered_mat_index = 0;\n\t\tfor (last_entered_mat_index = stack_position; last_entered_mat_index >= 0; last_entered_mat_index--)\n\t\t\t// The three conditions in order are:\n\t\t\t// \t- We found a materal in the stack that is not the material that we're currently intersecting\n\t\t\t//\t- The entry of that material in the stack is the topmost (the last entry of its material kind)\n\t\t\t//\t- The entry of that material in the stack is odd_parity = we've entered that material but haven't left it yet\n\t\t\t//\n\t\t\t//\t= the last entered material\n\t\t\tif (stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_material_index() != material_index \n\t\t\t && stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_topmost()\n\t\t\t && stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_odd_parity())\n\t\t\t\tbreak;\n\n\t\t// Parity of the material we're inserting in the stack\n\t\tbool odd_parity = true;\n\t\t// Index in the stack of the previous material that is the same as\n\t\t// the one we're trying to insert in the stack.\n\t\tint previous_same_mat_index;\n\t\tfor (previous_same_mat_index = stack_position; previous_same_mat_index >= 0; previous_same_mat_index--)\n\t\t{\n\t\t\tint stack_index = NESTED_DIELECTRICS_STACK_INDEX_SHIFT(previous_same_mat_index);\n\t\t\tif (stack_entries[stack_index].get_material_index() == material_index)\n\t\t\t{\n\t\t\t\t// The previous stack entry of the same material is not the topmost anymore\n\t\t\t\tstack_entries[stack_index].set_topmost(false);\n\t\t\t\t// The current parity is the inverse of the previous one\n\t\t\t\todd_parity = !stack_entries[stack_index].get_odd_parity();\n\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\t\n\t\tout_inside_material = !odd_parity;\n\n\t\t// Inserting the material in the stack\n\t\tif (stack_position < NestedDielectricsStackSize - 1)\n\t\t\tstack_position++;\n\n\t\tint new_stack_index = NESTED_DIELECTRICS_STACK_INDEX_SHIFT(stack_position);\n\t\tstack_entries[new_stack_index].set_material_index(material_index);\n\t\tstack_entries[new_stack_index].set_odd_parity(odd_parity);\n\t\tstack_entries[new_stack_index].set_topmost(true);\n\t\tstack_entries[new_stack_index].set_priority(material_priority);\n\n\t\tif (material_priority < stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_priority())\n\t\t{\n\t\t\t// Skipping the boundary because the intersected material has a\n\t\t\t// lower priority than the material we're currently in\n\t\t\treturn true;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (odd_parity)\n\t\t\t{\n\t\t\t\t// We are entering the material\n\t\t\t\tout_incident_material_index = stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_material_index();\n\t\t\t\tout_outgoing_material_index = material_index;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\t// Exiting material\n\t\t\t\tout_incident_material_index = material_index;\n\t\t\t\tout_outgoing_material_index = stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(last_entered_mat_index)].get_material_index();\n\t\t\t}\n\n\t\t\t// Not skipping the boundary\n\t\t\treturn false;\n\t\t}\n\t}\n\n\tHIPRT_HOST_DEVICE void pop(const bool inside_material)\n\t{\n\t\tint stack_top_mat_index = stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(stack_position)].get_material_index();\n\t\tif (stack_position > 0)\n\t\t\t// Checking that we have room to pop.\n\t\t\t// For a very small stack (size of 2) that overflown \n\t\t\t// (we couldn't push all the material we needed to because of \n\t\t\t// stack size constraint), it can happen that the stack position\n\t\t\t// at this point is already 0 and we cannot pop.\n\t\t\tstack_position--;\n\n\t\tif (inside_material)\n\t\t{\n\t\t\tint previous_same_mat_index;\n\t\t\tfor (previous_same_mat_index = stack_position; previous_same_mat_index >= 0; previous_same_mat_index--)\n\t\t\t\tif (stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(previous_same_mat_index)].get_material_index() == stack_top_mat_index)\n\t\t\t\t\tbreak;\n\n\t\t\tif (previous_same_mat_index >= 0)\n\t\t\t\tfor (int i = previous_same_mat_index + 1; i <= stack_position; i++)\n\t\t\t\t\tstack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i - 1)] = stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)];\n\n\t\t\t// For very small stacks (2 for example), we may not be able to pop twice\n\t\t\t// at all so we check the position on the stack first\n\t\t\tif (stack_position > 0)\n\t\t\t\tstack_position--;\n\t\t}\n\n\t\tfor (int i = stack_position; i >= 0; i--)\n\t\t{\n\t\t\tif (stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].get_material_index() == stack_top_mat_index)\n\t\t\t{\n\t\t\t\tstack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].set_topmost(true);\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\n\t// We only need all of this if the stack size is actually > 0,\n\t// otherwise, we're just not going to do the nested dielectrics handling at all\n\n\tStackPriorityEntry stack_entries[NestedDielectricsStackSize];\n\n\tstatic constexpr unsigned int MAX_MATERIAL_INDEX = StackPriorityEntry::MATERIAL_INDEX_MAXIMUM;\n\n\t// Stack position is pointing at the last valid entry.\n\t// Entry 0 is always present and represent air basically\n\tint stack_position = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ONB.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_ONB_H\n#define DEVICE_ONB_H\n\n#include \"HostDeviceCommon/Math.h\"\n\n /*\n  * This uses the technique from \"Improved accuracy when building an orthonormal basis\" by Nelson Max, \n  * https://jcgt.org/published/0006/01/02.\n  * \n  * Taken from https://github.com/nvpro-samples/nvpro_core/blob/master/nvvkhl/shaders/func.h\n  * and optimised a little bit by @tigrazone\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void build_ONB(const float3& N, float3& T, float3& B)\n{\n    if (N.z < -0.99998796f)  // Handle the singularity\n    {\n        T = make_float3(0.0f, -1.0f, 0.0f);\n        B = make_float3(-1.0f, 0.0f, 0.0f);\n        return;\n    }\n\n    float nxa = -N.x / (1.0f + N.z);\n    T = make_float3(1.0f + N.x * nxa, nxa * N.y, -N.x);\n    B = make_float3(T.y, 1.0f - N.y * N.y / (1.0f + N.z), -N.y);\n}\n\n/*\n * Rotation of the basis around the normal by 'basis_rotation' radians\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void build_rotated_ONB(const float3& N, float3& T, float3& B, float basis_rotation)\n{\n    float3 up = hippt::abs(N.z) < 0.9999999f ? make_float3(0.0f, 0.0f, 1.0f) : make_float3(1.0f, 0.0f, 0.0f);\n    T = hippt::normalize(hippt::cross(up, N));\n\n    // Rodrigues' rotation\n    T = T * cos(basis_rotation) + hippt::cross(N, T) * sin(basis_rotation) + N * hippt::dot(N, T) * (1.0f - cos(basis_rotation));\n    B = hippt::cross(N, T);\n}\n\n/*\n * Transforms V from its local space to the space around the normal\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 local_to_world_frame(const float3& N, const float3& V)\n{\n    float3 T, B;\n    build_ONB(N, T, B);\n\n    return hippt::normalize(V.x * T + V.y * B + V.z * N);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 local_to_world_frame(const float3& T, const float3& B, const float3& N, const float3& V)\n{\n    return hippt::normalize(V.x * T + V.y * B + V.z * N);\n}\n\n/*\n * Transforms V from its space to the local space around the normal\n * The given normal is the Z axis of the local frame around the normal\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 world_to_local_frame(const float3& N, const float3& V)\n{\n    float3 T, B;\n    build_ONB(N, T, B);\n\n    return hippt::normalize(make_float3(hippt::dot(V, T), hippt::dot(V, B), hippt::dot(V, N)));\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 world_to_local_frame(const float3& T, const float3& B, const float3& N, const float3& V)\n{\n    return hippt::normalize(make_float3(hippt::dot(V, T), hippt::dot(V, B), hippt::dot(V, N)));\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/PathTracing.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_PATH_TRACING_H\n#define DEVICE_INCLUDES_PATH_TRACING_H\n\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/RussianRoulette.h\"\n#include \"Device/includes/WarpDirectionReuse.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_DEVICE bool path_tracing_find_indirect_bounce_intersection(HIPRTRenderData& render_data, hiprtRay ray, RayPayload& out_ray_payload, HitInfo& out_closest_hit_info, Xorshift32Generator& random_number_generator)\n{\n\treturn trace_main_path_ray(render_data, ray, out_ray_payload, out_closest_hit_info, out_closest_hit_info.primitive_index, out_ray_payload.bounce, random_number_generator);\n}\n\n/**\n * If sampleDirectionOnly is 'true', only the direction for the next bounce will be computed\n * but without evaluating the contribution of the BSDF or the PDF.\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_DEVICE void path_tracing_sample_next_indirect_bounce(HIPRTRenderData& render_data, RayPayload& ray_payload, HitInfo& closest_hit_info, float3 view_direction, ColorRGB32F& out_bsdf_color, float3& out_bounce_direction, float& out_bsdf_pdf, Xorshift32Generator& random_number_generator, BSDFIncidentLightInfo* out_sampled_light_info = nullptr)\n{\n    BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), *out_sampled_light_info, ray_payload.volume_state, true, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness);\n\n    out_bsdf_color = bsdf_dispatcher_sample<sampleDirectionOnly>(render_data, bsdf_context, out_bounce_direction, out_bsdf_pdf, random_number_generator);\n\n    ray_payload.accumulate_roughness(*out_sampled_light_info);\n\n#if DoFirstBounceWarpDirectionReuse == KERNEL_OPTION_TRUE\n    warp_direction_reuse(render_data, closest_hit_info, ray_payload, -ray.direction, bounce_direction, bsdf_color, bsdf_pdf, bounce, random_number_generator);\n#endif\n}\n\n/**\n * Returns the new ray throughput after attenuation of the given 'current_throughput'\n */\nHIPRT_DEVICE ColorRGB32F path_tracing_update_ray_throughput(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo& closest_hit_info, ColorRGB32F current_throughput, float& rr_throughput_scaling, ColorRGB32F bsdf_color, float3 bounce_direction, float bsdf_pdf, Xorshift32Generator& random_number_generator, bool apply_russian_roulette = true)\n{\n    ColorRGB32F throughput_attenuation = bsdf_color * hippt::abs(hippt::dot(bounce_direction, closest_hit_info.shading_normal)) / bsdf_pdf;\n    // Russian roulette\n    if (apply_russian_roulette && !do_russian_roulette(render_data.render_settings, ray_payload.bounce, current_throughput, rr_throughput_scaling, throughput_attenuation, random_number_generator))\n        return ColorRGB32F(0.0f);\n\n    // Dispersion ray throughput filter\n    current_throughput *= get_dispersion_ray_color(ray_payload.volume_state.sampled_wavelength, ray_payload.material.dispersion_scale);\n    current_throughput *= throughput_attenuation;\n    // Clamp every component to a minimum of 1.0e-5f to avoid numerical instabilities that can\n    // happen: with some material, the throughput can get so low that it becomes denormalized and\n    // this can cause issues in some parts of the renderer (most notably the NaN detection)\n    current_throughput.max(ColorRGB32F(1.0e-5f, 1.0e-5f, 1.0e-5f));\n\n    ray_payload.next_ray_state = RayState::BOUNCE;\n\n    return current_throughput;\n}\n\n/**\n * Returns the new ray throughput after attenuation of the given 'current_throughput'\n */\nHIPRT_DEVICE ColorRGB32F path_tracing_update_ray_throughput(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo& closest_hit_info, ColorRGB32F current_throughput, ColorRGB32F bsdf_color, float3 bounce_direction, float bsdf_pdf, Xorshift32Generator& random_number_generator, bool apply_russian_roulette = true)\n{\n    float unused_rr_throughput_scaling;\n    return path_tracing_update_ray_throughput(render_data, ray_payload, closest_hit_info, current_throughput, unused_rr_throughput_scaling, bsdf_color, bounce_direction, bsdf_pdf, random_number_generator, apply_russian_roulette);\n}\n\n/**\n * Returns true if the bounce was sampled successfully,\n * false otherwise (is the BSDF sample failed, if russian roulette killed the sample, ...)\n * \n * If sampleDirectionOnly is 'true', only the direction for the next bounce will be computed\n * but without evaluating the contribution of the BSDF or the PDF.\n */\ntemplate <bool sampleDirectionOnly = false>\nHIPRT_DEVICE bool path_tracing_compute_next_indirect_bounce(HIPRTRenderData& render_data, RayPayload& ray_payload, HitInfo& closest_hit_info, float3 view_direction, hiprtRay& out_ray, Xorshift32Generator& random_number_generator, BSDFIncidentLightInfo* incident_light_info = nullptr)\n{\n    ColorRGB32F bsdf_color;\n    float3 bounce_direction;\n    float bsdf_pdf;\n    path_tracing_sample_next_indirect_bounce<sampleDirectionOnly>(render_data, ray_payload, closest_hit_info, view_direction, bsdf_color, bounce_direction, bsdf_pdf, random_number_generator, incident_light_info);\n\n    // Terminate ray if bad sampling\n    if (bsdf_pdf <= 0.0f && !sampleDirectionOnly)\n        return false;\n\n    ray_payload.throughput = path_tracing_update_ray_throughput(render_data, ray_payload, closest_hit_info, ray_payload.throughput, bsdf_color, bounce_direction, bsdf_pdf, random_number_generator);\n    if (ray_payload.throughput.is_black() && !sampleDirectionOnly)\n        // Killed by russian roulette\n        return false;\n\n    out_ray.origin = closest_hit_info.inter_point;\n    out_ray.direction = bounce_direction;\n\n    return true;\n}\n\nHIPRT_DEVICE void store_denoiser_AOVs(HIPRTRenderData& render_data, uint32_t pixel_index, float3 shading_normal, ColorRGB32F base_color)\n{\n    if (render_data.render_settings.sample_number == 0)\n        render_data.aux_buffers.denoiser_albedo[pixel_index] = base_color;\n    else\n        render_data.aux_buffers.denoiser_albedo[pixel_index] = (render_data.aux_buffers.denoiser_albedo[pixel_index] * render_data.render_settings.denoiser_AOV_accumulation_counter + base_color) / (render_data.render_settings.denoiser_AOV_accumulation_counter + 1.0f);\n\n    if (render_data.render_settings.sample_number == 0)\n        render_data.aux_buffers.denoiser_normals[pixel_index] = shading_normal;\n    else\n    {\n        float3 accumulated_normal = (render_data.aux_buffers.denoiser_normals[pixel_index] * render_data.render_settings.denoiser_AOV_accumulation_counter + shading_normal) / (render_data.render_settings.denoiser_AOV_accumulation_counter + 1.0f);\n        float normal_length = hippt::length(accumulated_normal);\n        if (!hippt::is_zero(normal_length))\n            // Checking that it is non-zero otherwise we would accumulate a persistent NaN in the buffer when normalizing by the 0-length\n            render_data.aux_buffers.denoiser_normals[pixel_index] = accumulated_normal / normal_length;\n    }\n}\n\nHIPRT_DEVICE ColorRGB32F path_tracing_miss_gather_envmap(HIPRTRenderData& render_data, const ColorRGB32F& ray_throughput, float3 ray_direction, int bounce, uint32_t pixel_index)\n{\n    ColorRGB32F skysphere_color;\n\n    if (render_data.world_settings.ambient_light_type == AmbientLightType::UNIFORM || render_data.bsdfs_data.white_furnace_mode)\n        skysphere_color = render_data.world_settings.uniform_light_color;\n    else if (render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP && render_data.world_settings.envmap_intensity == 0.0f)\n        return ColorRGB32F(0.0f);\n    else if (render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP)\n    {\n#if EnvmapSamplingStrategy != ESS_NO_SAMPLING\n        // If we have sampling, only taking envmap into account on camera ray miss\n        if (bounce == 0)\n#endif\n        {\n            // We're only getting the skysphere radiance for the first rays because the\n            // syksphere is importance sampled.\n            skysphere_color = eval_envmap_no_pdf(render_data.world_settings, ray_direction);\n\n#if EnvmapSamplingStrategy == ESS_NO_SAMPLING\n            // If we don't have envmap sampling, we're only going to unscale on\n            // bounce 0 (which is when a ray misses directly --> background color).\n            // Otherwise, if not bounce 2, we do want to take the scaling into\n            // account so this if will fail and the envmap color will never be unscaled\n            if (!render_data.world_settings.envmap_scale_background_intensity && bounce == 0)\n#else\n            if (!render_data.world_settings.envmap_scale_background_intensity)\n#endif\n                // Un-scaling the envmap if the user doesn't want to scale the background\n                skysphere_color /= render_data.world_settings.envmap_intensity;\n        }\n    }\n\n    skysphere_color = clamp_light_contribution(skysphere_color, render_data.render_settings.envmap_contribution_clamp, /* clamp condition */ true);\n\n    ColorRGB32F indirect_lighting_contribution = skysphere_color * ray_throughput;\n    // Only clamping with the indirect lighting clamp value if\n    // this is bounce > 0 (thanks to /* clamp condition */ bounce > 0)\n    ColorRGB32F clamped_indirect_lighting_contribution = clamp_light_contribution(\n        indirect_lighting_contribution, render_data.render_settings.indirect_contribution_clamp,\n        /* clamp condition */ bounce > 0);\n\n    if (bounce == 0)\n        // The camera ray missed so we don't have the normals but we have the base color\n        store_denoiser_AOVs(render_data, pixel_index, make_float3(0, 0, 0), skysphere_color);\n\n    return clamped_indirect_lighting_contribution;\n}\n\nHIPRT_DEVICE ColorRGB32F path_tracing_miss_gather_envmap(HIPRTRenderData& render_data, RayPayload& ray_payload, float3 ray_direction, uint32_t pixel_index)\n{\n    return path_tracing_miss_gather_envmap(render_data, ray_payload.throughput, ray_direction, ray_payload.bounce, pixel_index);\n}\n\nHIPRT_DEVICE void path_tracing_accumulate_color(const HIPRTRenderData& render_data, const ColorRGB32F& ray_color, uint32_t pixel_index)\n{\n#if DisplayOnlySampleN == KERNEL_OPTION_TRUE\n     int sampleIndex = render_data.render_settings.output_debug_sample_N;\n\n     if (render_data.render_settings.sample_number == sampleIndex)\n     {\n         render_data.buffers.accumulated_ray_colors[pixel_index] = ray_color;\n#if ViewportColorOverriden == 0\n         render_data.buffers.accumulated_ray_colors[pixel_index] *= (sampleIndex + 1);\n#endif\n     }\n\n     return;\n#endif\n\n    if (render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n    {\n        float squared_luminance_of_samples = ray_color.luminance() * ray_color.luminance();\n        // We can only use these buffers if the adaptive sampling or the stop noise threshold is enabled.\n        // Otherwise, the buffers are destroyed to save some VRAM so they are not accessible\n        render_data.aux_buffers.pixel_squared_luminance[pixel_index] += squared_luminance_of_samples;\n    }\n\n    if (render_data.render_settings.sample_number == 0)\n        render_data.buffers.accumulated_ray_colors[pixel_index] = ray_color;\n    else\n        // If we are at a sample that is not 0, this means that we are accumulating\n        render_data.buffers.accumulated_ray_colors[pixel_index] += ray_color;\n\n    if (render_data.buffers.gmon_estimator.sets != nullptr)\n    {\n        // GMoN is in use, accumulating in the GMoN sets\n\n        unsigned int offset = render_data.render_settings.render_resolution.x * render_data.render_settings.render_resolution.y * render_data.buffers.gmon_estimator.next_set_to_accumulate + pixel_index;\n\n        if (render_data.render_settings.sample_number == 0)\n            render_data.buffers.gmon_estimator.sets[offset] = ray_color;\n        else\n            render_data.buffers.gmon_estimator.sets[offset] += ray_color;\n    }\n}\n\nHIPRT_DEVICE void path_tracing_accumulate_debug_view_color(const HIPRTRenderData& render_data, RayPayload& ray_payload, int pixel_index, Xorshift32Generator& rng)\n{\n#if ViewportColorOverriden == 1\n    // Modifying the ray color such that we display some debug color to the screen\n\n\n#if DirectLightNEEPlusPlusDisplayShadowRaysDiscarded == KERNEL_OPTION_TRUE\n    // Nothing to do, the debug is already handled in the shadow ray NEE function\n#elif NEEPlusPlusDebugMode != NEE_PLUS_PLUS_DEBUG_MODE_NO_DEBUG\n    if (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n    {\n        // We have a first hit\n        float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n        float3 shading_normal = render_data.g_buffer.shading_normals[pixel_index].unpack();\n        float3 view_direction = render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n        \n        unsigned int trash_checksum;\n        NEEPlusPlusContext context;\n        context.envmap = false;\n        context.point_on_light = make_float3(0, 0, 0);\n        context.shaded_point = primary_hit;\n\n        ray_payload.ray_color = ColorRGB32F::random_color(render_data.nee_plus_plus.hash_context(context, render_data.current_camera, trash_checksum));\n        ray_payload.ray_color *= (render_data.render_settings.sample_number + 1);\n        ray_payload.ray_color *= hippt::dot(shading_normal, view_direction);\n    }\n#elif ReGIR_DebugMode != REGIR_DEBUG_MODE_NO_DEBUG\n#if ReGIR_DebugMode  == REGIR_DEBUG_MODE_GRID_CELLS\n    if (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n    {\n        // We have a first hit\n        float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n        float3 normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n        float3 view_direction = render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n        float primary_hit_roughness = render_data.g_buffer.materials[pixel_index].get_roughness();\n\n        ray_payload.ray_color = render_data.render_settings.regir_settings.get_random_cell_color(primary_hit, normal, render_data.current_camera, primary_hit_roughness, true);\n        ray_payload.ray_color *= (render_data.render_settings.sample_number + 1);\n        ray_payload.ray_color *= hippt::dot(normal, view_direction);\n    }\n#elif ReGIR_DebugMode == REGIR_DEBUG_MODE_AVERAGE_CELL_NON_CANONICAL_RESERVOIR_CONTRIBUTION\n    if (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n    {\n        float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n\n        unsigned int cell_index = render_data.render_settings.regir_settings.get_hash_grid_cell_index_from_world_pos(primary_hit);\n\n        float average_contribution = 0.0f;\n        for (int i = 0; i < render_data.render_settings.regir_settings.grid_fill.get_non_canonical_reservoir_count_per_cell(); i++)\n        {\n            ReGIRReservoir reservoir = render_data.render_settings.regir_settings.get_cell_non_canonical_reservoir_from_cell_reservoir_index(cell_index, i);\n            average_contribution += reservoir.sample.target_function * reservoir.UCW;\n        }\n\n        // Averaging\n        average_contribution /= render_data.render_settings.regir_settings.grid_fill.get_non_canonical_reservoir_count_per_cell();\n        // Scaling by the debug factor for visualization purposes\n        average_contribution *= render_data.render_settings.regir_settings.debug_view_scale_factor;\n        // Scaling by SPP\n        average_contribution *= (render_data.render_settings.sample_number + 1);\n\n        ray_payload.ray_color = ColorRGB32F(average_contribution);\n    }\n#elif ReGIR_DebugMode == REGIR_DEBUG_MODE_AVERAGE_CELL_CANONICAL_RESERVOIR_CONTRIBUTION\n    if (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n    {\n        float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n\n        unsigned int cell_index = render_data.render_settings.regir_settings.get_hash_grid_cell_index_from_world_pos(primary_hit);\n\n        float average_contribution = 0.0f;\n        for (int i = 0; i < render_data.render_settings.regir_settings.grid_fill.get_canonical_reservoir_count_per_cell(); i++)\n        {\n            ReGIRReservoir reservoir = render_data.render_settings.regir_settings.get_cell_canonical_reservoir_from_cell_reservoir_index(cell_index, i);\n            average_contribution += reservoir.sample.target_function * reservoir.UCW;\n        }\n\n        // Averaging\n        average_contribution /= render_data.render_settings.regir_settings.grid_fill.get_canonical_reservoir_count_per_cell();\n        // Scaling by the debug factor for visualization purposes\n        average_contribution *= render_data.render_settings.regir_settings.debug_view_scale_factor;\n        // Scaling by SPP\n        average_contribution *= (render_data.render_settings.sample_number + 1);\n\n        ray_payload.ray_color = ColorRGB32F(average_contribution);\n    }\n#elif ReGIR_DebugMode == REGIR_DEBUG_MODE_REPRESENTATIVE_POINTS\n    if (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n    {\n        float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n        float3 normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n        float primary_hit_roughness = render_data.g_buffer.materials[pixel_index].get_roughness();\n\n        unsigned int cell_index = render_data.render_settings.regir_settings.get_hash_grid_cell_index_from_world_pos(primary_hit, normal, render_data.current_camera, primary_hit_roughness, true);\n\n        ColorRGB32F color;\n        float3 rep_point = ReGIR_get_cell_world_point(render_data, cell_index, true);\n        // Interpreting debug_view_scale_factor as a distance\n        if (hippt::length(rep_point - primary_hit) < render_data.render_settings.regir_settings.debug_view_scale_factor)\n            color = ColorRGB32F::random_color(cell_index + 1);\n\n        // Scaling by SPP so that the visualization doesn't get darker and darker with increasing number of SPP\n        color *= render_data.render_settings.sample_number + 1;\n\n        ray_payload.ray_color = ColorRGB32F(color);\n    }\n#elif ReGIR_DebugMode == REGIR_DEBUG_MODE_REPRESENTATIVE_NORMALS\nif (render_data.g_buffer.first_hit_prim_index[pixel_index] != -1)\n{\n    float3 primary_hit = render_data.g_buffer.primary_hit_position[pixel_index];\n    float3 normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n    float primary_hit_roughness = render_data.g_buffer.materials[pixel_index].get_roughness();\n\n    unsigned int cell_index = render_data.render_settings.regir_settings.get_hash_grid_cell_index_from_world_pos(primary_hit, normal, render_data.current_camera, primary_hit_roughness, true);\n\n    ColorRGB32F color = (ColorRGB32F(ReGIR_get_cell_world_normal(render_data, cell_index, true)) + ColorRGB32F(1.0f)) * 0.5f;\n\n    // Scaling by SPP so that the visualization doesn't get darker and darker with increasing number of SPP\n    color *= render_data.render_settings.sample_number + 1;\n\n    ray_payload.ray_color = ColorRGB32F(color);\n}\n#endif\n#endif\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/RIS/RIS.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RIS_H\n#define DEVICE_RIS_H\n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/RIS/RIS_Reservoir.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F evaluate_reservoir_sample(HIPRTRenderData& render_data, RayPayload& ray_payload, \n    const HitInfo& closest_hit_info, const float3& view_direction,\n    const RISReservoir& reservoir, Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F final_color;\n\n    if (reservoir.UCW <= 0.0f)\n        // No valid sample means no light contribution\n        return ColorRGB32F(0.0f);\n\n    RISSample sample = reservoir.sample;\n\n    bool in_shadow;\n    float distance_to_light;\n    float3 evaluated_point = closest_hit_info.inter_point;\n    float3 shadow_ray_direction = sample.point_on_light_source - evaluated_point;\n    float3 shadow_ray_direction_normalized = shadow_ray_direction / (distance_to_light = hippt::length(shadow_ray_direction));\n\n    NEEPlusPlusContext nee_plus_plus_context;\n    if (sample.is_bsdf_sample)\n        // A BSDF sample that has been picked by RIS cannot be occluded otherwise\n        // it would have a weight of 0 and would never be picked by RIS\n        in_shadow = false;\n    else\n    {\n        hiprtRay shadow_ray;\n        shadow_ray.origin = evaluated_point;\n        shadow_ray.direction = shadow_ray_direction_normalized;\n\n        nee_plus_plus_context.point_on_light = sample.point_on_light_source;\n        nee_plus_plus_context.shaded_point = shadow_ray.origin;\n        in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n    }\n\n    if (!in_shadow)\n    {\n        float bsdf_pdf;\n        float cosine_at_evaluated_point;\n        ColorRGB32F bsdf_color;\n\n        if (sample.is_bsdf_sample)\n        {\n            // If we picked a BSDF sample, we're using the already computed cosine term and color\n            // because it's annoying to recompute it (we have to know if the BSDF is a refraction\n            // sample or not)\n            bsdf_color = sample.bsdf_sample_contribution;\n            cosine_at_evaluated_point = sample.bsdf_sample_cosine_term;\n        }\n        else\n        {\n            BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n            BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray_direction_normalized, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n            bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n            cosine_at_evaluated_point = hippt::abs(hippt::dot(closest_hit_info.shading_normal, shadow_ray_direction_normalized));\n        }\n\n        final_color = bsdf_color * reservoir.UCW * sample.emission * cosine_at_evaluated_point;\n        if (!sample.is_bsdf_sample)\n            final_color /= nee_plus_plus_context.unoccluded_probability;\n    }\n\n    return final_color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE RISReservoir sample_bsdf_and_lights_RIS_reservoir(const HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo& closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    // If we're rendering at low resolution, only doing 1 candidate of each\n    // for better interactive framerates\n    int nb_light_candidates = render_data.render_settings.do_render_low_resolution() ? 1 : render_data.render_settings.ris_settings.number_of_light_candidates;\n    int nb_bsdf_candidates = render_data.render_settings.do_render_low_resolution() ? 1 : render_data.render_settings.ris_settings.number_of_bsdf_candidates;\n\n    if (!MaterialUtils::can_do_light_sampling(ray_payload.material))\n        nb_light_candidates = 0;\n\n    // Sampling candidates with weighted reservoir sampling\n    RISReservoir reservoir;\n    for (int i = 0; i < nb_light_candidates; i++)\n    {\n        float target_function = 0.0f;\n        float candidate_weight = 0.0f;\n        LightSampleInformation light_sample_info = sample_one_emissive_triangle(render_data,\n            closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, \n            closest_hit_info.primitive_index, ray_payload,\n            random_number_generator);\n\n        if (light_sample_info.area_measure_pdf > 0.0f)\n        {\n            // It can happen that the light PDF returned by the emissive triangle\n            // sampling function is 0 because of emissive triangles that are so\n            // small that we cannot compute their normal and their area (the cross\n            // product of their edges gives a quasi-null vector --> length of 0.0f --> area of 0)\n\n            float3 to_light_direction = light_sample_info.point_on_light - closest_hit_info.inter_point;\n            float distance_to_light = hippt::length(to_light_direction);\n            to_light_direction = to_light_direction / distance_to_light; // Normalization\n            float cosine_at_light_source = compute_cosine_term_at_light_source(light_sample_info.light_source_normal, -to_light_direction);\n            // Multiplying by the inside_surface_multiplier here because if we're inside the surface, we want to flip the normal\n            // for the dot product to be \"properly\" oriented.\n            float cosine_at_evaluated_point = hippt::abs(hippt::dot(closest_hit_info.shading_normal, to_light_direction));\n            if (cosine_at_evaluated_point > 0.0f && cosine_at_light_source > 1.0e-6f)\n            {\n                float bsdf_pdf = 0.0f;\n                // Early check for minimum light contribution: if the light itself doesn't contribute enough,\n                // adding the BSDF attenuation on top of it will only make it worse so we can already\n                // skip the light and saves ourselves the evaluation of the BSDF\n                bool contributes_enough = check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, light_sample_info.emission / light_sample_info.area_measure_pdf);\n                if (!contributes_enough)\n                    target_function = 0.0f;\n                else\n                {\n                    // Only going to evaluate the target function if we passed the preliminary minimum light contribution test\n\n                    BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n                    BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, to_light_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n                    ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n                    ColorRGB32F light_contribution = bsdf_color * light_sample_info.emission * cosine_at_evaluated_point;\n                    // Checking the light contribution and taking the BSDF and light PDFs into account\n                    contributes_enough = check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, light_contribution / bsdf_pdf / light_sample_info.area_measure_pdf);\n                    if (!contributes_enough)\n                        // The light doesn't contribute enough, setting the target function to 0.0f\n                        // so that this light sample is skipped\n                        // \n                        // Also, if at least one thread is going to evaluate the light anyways, because of the divergence that this would\n                        // create, we may as well evaluate the light for all threads and not loose that much performance anyways\n                        target_function = 0.0f;\n                    else\n                        target_function = light_contribution.luminance();\n                }\n\n#if RISUseVisiblityTargetFunction == KERNEL_OPTION_TRUE\n                if (!render_data.render_settings.do_render_low_resolution() && target_function > 0.0f)\n                {\n                    // Only doing visiblity if we're not rendering at low resolution\n                    // (meaning we're moving the camera) for better interaction framerates\n\n                    hiprtRay shadow_ray;\n                    shadow_ray.origin = closest_hit_info.inter_point;\n                    shadow_ray.direction = to_light_direction;\n\n                    bool visible = !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, ray_payload.bounce, random_number_generator);\n\n                    target_function *= visible;\n                }\n#endif\n\n                // Converting the PDF from area measure to solid angle measure\n                float solid_angle_light_pdf = area_to_solid_angle_pdf(light_sample_info.area_measure_pdf, distance_to_light, cosine_at_light_source);\n\n                float mis_weight = balance_heuristic(solid_angle_light_pdf, nb_light_candidates, bsdf_pdf, nb_bsdf_candidates);\n                candidate_weight = mis_weight * target_function / solid_angle_light_pdf;\n            }\n        }\n\n        RISSample light_RIS_sample;\n        light_RIS_sample.is_bsdf_sample = false;\n        light_RIS_sample.point_on_light_source = light_sample_info.point_on_light;\n        light_RIS_sample.target_function = target_function;\n        light_RIS_sample.emission = light_sample_info.emission;\n\n        reservoir.add_one_candidate(light_RIS_sample, candidate_weight, random_number_generator);\n        reservoir.sanity_check();\n    }\n\n    // Whether or not a BSDF sample has been retained by the reservoir\n    for (int i = 0; i < nb_bsdf_candidates; i++)\n    {\n        float bsdf_sample_pdf = 0.0f;\n        float target_function = 0.0f;\n        float candidate_weight = 0.0f;\n        float3 sampled_bsdf_direction;\n\n        BSDFIncidentLightInfo incident_light_info;\n        BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n        ColorRGB32F bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, sampled_bsdf_direction, bsdf_sample_pdf, random_number_generator);\n\n        RISSample bsdf_RIS_sample;\n        if (bsdf_sample_pdf > 0.0f)\n        {\n            hiprtRay bsdf_ray;\n            bsdf_ray.origin = closest_hit_info.inter_point;\n            bsdf_ray.direction = sampled_bsdf_direction;\n\n            BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n            bool hit_found = evaluate_bsdf_light_sample_ray(render_data, bsdf_ray, 1.0e35f, shadow_light_ray_hit_info, closest_hit_info.primitive_index, ray_payload.bounce, random_number_generator);\n\n            if (hit_found && !shadow_light_ray_hit_info.hit_emission.is_black() && compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -sampled_bsdf_direction) > 0.0f)\n            {\n                // If we intersected an emissive material, compute the weight. \n                // Otherwise, the weight is 0 because of the emision being 0 so we just don't compute it\n\n                // Using abs here because we want the dot product to be positive.\n                // You may be thinking that if we're doing this, then we're not going to discard BSDF\n                // sampled direction that are below the surface (whereas we should discard them).\n                // That would be correct but bsdf_dispatcher_sample return a PDF == 0.0f if a bad\n                // direction was sampled and if the PDF is 0.0f, we never get to this line of code\n                // you're reading. If we are here, this is because we sampled a direction that is\n                // correct for the BSDF. Even if the direction is correct, the dot product may be\n                // negative in the case of refractions / total internal reflections and so in this case,\n                // we'll need to abs() the dot product for it to be positive\n                float cosine_at_evaluated_point = hippt::abs(hippt::dot(closest_hit_info.shading_normal, sampled_bsdf_direction));\n\n                // Our target function does not include the geometry term because we're integrating\n                // in solid angle. The geometry term in the target function ( / in the integrand) is only\n                // for surface area direct lighting integration\n                ColorRGB32F light_contribution = bsdf_color * shadow_light_ray_hit_info.hit_emission * cosine_at_evaluated_point;\n                target_function = light_contribution.luminance();\n\n                float light_pdf = pdf_of_emissive_triangle_hit_solid_angle(render_data, shadow_light_ray_hit_info, sampled_bsdf_direction);\n                bool contributes_enough = bsdf_sample_pdf <= 0.0f || check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, light_contribution / light_pdf / bsdf_sample_pdf);\n                if (!contributes_enough)\n                    target_function = 0.0f;\n\n                float mis_weight = balance_heuristic(bsdf_sample_pdf, nb_bsdf_candidates, light_pdf, nb_light_candidates);\n                candidate_weight = mis_weight * target_function / bsdf_sample_pdf;\n\n                bsdf_RIS_sample.emission = shadow_light_ray_hit_info.hit_emission;\n                bsdf_RIS_sample.point_on_light_source = bsdf_ray.origin + bsdf_ray.direction * shadow_light_ray_hit_info.hit_distance;\n                bsdf_RIS_sample.is_bsdf_sample = true;\n                bsdf_RIS_sample.bsdf_sample_contribution = bsdf_color;\n                bsdf_RIS_sample.bsdf_sample_cosine_term = cosine_at_evaluated_point;\n                bsdf_RIS_sample.target_function = target_function;\n            }\n        }\n\n        reservoir.add_one_candidate(bsdf_RIS_sample, candidate_weight, random_number_generator);\n        reservoir.sanity_check();\n    }\n\n    reservoir.end();\n    return reservoir;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_lights_RIS(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo& closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    if (render_data.buffers.emissive_triangles_count == 0)\n        return ColorRGB32F(0.0f);\n\n    RISReservoir reservoir = sample_bsdf_and_lights_RIS_reservoir(render_data, ray_payload, closest_hit_info, view_direction, random_number_generator);\n\n    return evaluate_reservoir_sample(render_data, ray_payload, \n        closest_hit_info, view_direction, \n        reservoir, random_number_generator);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/RIS/RIS_Reservoir.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RIS_RESERVOIR_H\n#define DEVICE_RIS_RESERVOIR_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifndef __KERNELCC__\n#include \"Utils/Utils.h\"\n\n // For multithreaded console error logging on the CPU if NaNs are detected\n#include <mutex>\nstatic std::mutex ris_log_mutex;\n#endif\n\nstruct RISSample\n{\n    ColorRGB32F emission;\n    float3 point_on_light_source = { 0, 0, 0 };\n\n    float target_function = 0.0f;\n\n    // TODO Can this be refactored? Is this needed?\n    bool is_bsdf_sample = false;\n    ColorRGB32F bsdf_sample_contribution;\n    float bsdf_sample_cosine_term = 0.0f;\n};\n\nstruct RISReservoir\n{\n    HIPRT_HOST_DEVICE void add_one_candidate(RISSample new_sample, float weight, Xorshift32Generator& random_number_generator)\n    {\n        M++;\n        weight_sum += weight;\n\n        if (random_number_generator() < weight / weight_sum)\n            sample = new_sample;\n    }\n\n    HIPRT_HOST_DEVICE void end()\n    {\n        if (weight_sum == 0.0f)\n            UCW = 0.0f;\n        else\n            UCW = 1.0f / sample.target_function * weight_sum;\n    }\n\n    HIPRT_HOST_DEVICE HIPRT_INLINE void sanity_check(int2 pixel_coords = make_int2(-1, -1))\n    {\n#ifndef __KERNELCC__\n        if (M < 0)\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"Negative reservoir M value at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << M << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(weight_sum) || std::isinf(weight_sum))\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"NaN or inf reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (weight_sum < 0)\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"Negative reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::abs(weight_sum) < std::numeric_limits<float>::min() && weight_sum != 0.0f)\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"Denormalized weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(UCW) || std::isinf(UCW))\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"NaN or inf reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (UCW < 0)\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"Negative reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << UCW << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(sample.target_function) || std::isinf(sample.target_function))\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"NaN or inf reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (sample.target_function < 0)\n        {\n            std::lock_guard<std::mutex> lock(ris_log_mutex);\n            std::cerr << \"Negative reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << sample.target_function << std::endl;\n            Utils::debugbreak();\n        }\n#else\n        (void)pixel_coords;\n#endif\n    }\n\n    unsigned int M = 0;\n    // TODO weight sum is never used at the same time as UCW so only one variable can be used for both to save space\n    float weight_sum = 0.0f;\n    float UCW = 0.0f;\n\n    RISSample sample;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/RayPayload.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RAY_PAYLOAD_H\n#define DEVICE_RAY_PAYLOAD_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n#include \"Device/includes/RayVolumeState.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\nenum RayState\n{\n\tBOUNCE,\n\tMISSED\n};\n\nstruct RayPayload\n{\n\t// Energy left in the ray after it bounces around the scene\n\tColorRGB32F throughput = ColorRGB32F(1.0f);\n\t// Final color of the ray\n\tColorRGB32F ray_color = ColorRGB32F(0.0f);\n\t// Camera ray is \"Bounce\" to give it a chance to hit the scene\n\tRayState next_ray_state = RayState::BOUNCE;\n\n\t// What bounce we're currently at\n\tint bounce = 0;\n\t// Roughness accumulated by the bounces of the ray along the path. In [0, 1]\n\t//\n\t// If a ray bounced on a Lambertian surface along its path for example, the\n\t// accumulated roughness is going to be 1.0f.\n\t//\n\t// If the camera ray bounced on a mirror, the accumulated roughness is going to be 0.0f at bounce == 1.\n\t//\n\t// If the ray bounced on a specular  diffuse surface, the accumulated roughness is going to be that\n\t// of which lobe was sampled between the specular or diffuse\n\t//\n\t// The accumulated roughness is computed as the maximum between the current accumulated roughness\n\t// and the roughness of the lobe that was sampled to get the next bounce direction\n\tfloat accumulated_roughness = 0.0f;\n\t\n\t// Material of the current hit\n\tDeviceUnpackedEffectiveMaterial material;\n\n\tRayVolumeState volume_state;\n\t\n\tHIPRT_HOST_DEVICE void accumulate_roughness(BSDFIncidentLightInfo sampled_lobe)\n\t{\n\t\tswitch (sampled_lobe)\n\t\t{\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_LOBE:\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_DIFFUSE_TRANSMISSION_LOBE:\n\t\t\taccumulated_roughness = 1.0f;\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE:\n\t\t\taccumulated_roughness = hippt::max(material.coat_roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_FIRST_METAL_LOBE:\n\t\t\taccumulated_roughness = hippt::max(material.roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_SECOND_METAL_LOBE:\n\t\t\taccumulated_roughness = hippt::max(material.second_roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_SPECULAR_LOBE:\n\t\t\t// The specular roughness is just material.roughness\n\t\t\taccumulated_roughness = hippt::max(material.roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFLECT_LOBE:\n\t\t\t// The glass roughness is just material.roughness\n\t\t\taccumulated_roughness = hippt::max(material.roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tcase LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE:\n\t\t\t// The glass roughness is just material.roughness\n\t\t\taccumulated_roughness = hippt::max(material.roughness, accumulated_roughness);\n\t\t\tbreak;\n\n\t\tdefault:\n\t\t\tbreak;\n\t\t}\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/RayVolumeState.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RAY_VOLUME_STATE_H\n#define DEVICE_RAY_VOLUME_STATE_H\n\n#include \"Device/includes/NestedDielectrics.h\"\n// Including dispersion for sampling a wavelength in the reconstruction of the first hit of RayVolumeState\n#include \"Device/includes/Dispersion.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\nstruct RayVolumeState\n{\n\t/**\n\t * On the GPU, it is necessary that the RayVolumeState is initialized manually as opposed to in a default constructor for example.\n\t * That's because the nested dielectrics stack is in shared memory and is thus a \"global variable\". \n\t * \n\t * If it were to be initialized in the RayVolumeState constructor, every declaration of a RayVolumeState variable\n\t * would call the constructor and reinitialize the whole nested dielectrics stack.\n\t */\n\tHIPRT_HOST_DEVICE RayVolumeState()\n\t{\n\t\tfor (int i = 0; i < NestedDielectricsStackSize; i++)\n\t\t{\n\t\t\tinterior_stack.stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].set_priority(0);\n\t\t\tinterior_stack.stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].set_odd_parity(true);\n\t\t\tinterior_stack.stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].set_topmost(true);\n\t\t\t// Setting the material index to the maximum\n\t\t\tinterior_stack.stack_entries[NESTED_DIELECTRICS_STACK_INDEX_SHIFT(i)].set_material_index(NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX);\n\t\t}\n\t}\n\n\tHIPRT_HOST_DEVICE void reconstruct_first_hit(const DeviceUnpackedEffectiveMaterial& material, int* material_indices_buffer, int primitive_index, Xorshift32Generator& random_number_generator)\n\t{\n\t\tif (primitive_index == -1)\n\t\t\t// No primary hit i.e. straight into the envmap\n\t\t\treturn;\n\n\t\tint mat_index = material_indices_buffer[primitive_index];\n\n\t\tinterior_stack.push(\n\t\t\tincident_mat_index,\n\t\t\toutgoing_mat_index,\n\t\t\tinside_material,\n\t\t\tmat_index,\n\t\t\tmaterial.get_dielectric_priority());\n\n\t\tif (material.dispersion_scale > 0.0f && material.specular_transmission > 0.0f && sampled_wavelength == 0.0f)\n\t\t\t// If we hit a dispersive material, we sample the wavelength that will be used\n\t\t\t// for computing the wavelength dependent IORs used for dispersion\n\t\t\t//\n\t\t\t// We're also not re-doing the sampling if a wavelength has already been sampled for that path\n\t\t\t//\n\t\t\t// Negating the wavelength to indicate that the throughput filter of the wavelength\n\t\t\t// hasn't been applied yet (applied in principled_glass_eval())\n\t\t\tsampled_wavelength = -sample_wavelength_uniformly(random_number_generator);\n\t}\n\n\t// How far has the ray traveled in the current volume.\n\tfloat distance_in_volume = 0.0f;\n\t// The stack of materials being traversed. Used for nested dielectrics handling\n\tNestedDielectricsInteriorStack interior_stack;\n\t// Indices of the material we were in before hitting the current dielectric surface\n\tint incident_mat_index = -1, outgoing_mat_index = -1;\n\t// Whether or not we're exiting a material\n\tbool inside_material = false;\n\n\t// For spectral dispersion. A random wavelength is sampled and replaces this value\n\t// when a glass object is hit. This wavelength can then be used to determine the IOR\n\t// that should be used for refractions/reflections on the dielectric object. \n\t// \n\t// The wavelength is also used to apply a throughput filter on the ray such that only the\n\t// sampled wavelength's color travels around the scene.\n\t//\n\t// If this value is negative, this is because the ray throughput filter hasn't been applied\n\t// yet. If the value is positive, the filter has been applied\n\tfloat sampled_wavelength = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/DI/FinalShading.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_FINAL_SHADING_H\n#define DEVICE_RESTIR_DI_FINAL_SHADING_H\n\n#include \"Device/includes/LightSampling/Envmap.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n // TODO make some simplification assuming that ReSTIR DI is never inside a surface (the camera being inside a surface may be an annoying case to handle)\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F evaluate_ReSTIR_DI_reservoir(const HIPRTRenderData& render_data, RayPayload& ray_payload, \n    const HitInfo& closest_hit_info, const float3& view_direction,\n    const ReSTIRDIReservoir& reservoir, Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F final_color;\n\n    if (reservoir.UCW <= 0.0f)\n        // No valid sample means no light contribution\n        return ColorRGB32F(0.0f);\n\n    ReSTIRDISample sample = reservoir.sample;\n\n    float distance_to_light;\n\n    float3 shadow_ray_direction;\n    if (sample.is_envmap_sample())\n    {\n        shadow_ray_direction = matrix_X_vec(render_data.world_settings.envmap_to_world_matrix, sample.point_on_light_source);\n        distance_to_light = 1.0e35f;\n    }\n    else\n    {\n        shadow_ray_direction = sample.point_on_light_source - closest_hit_info.inter_point;\n        shadow_ray_direction = shadow_ray_direction / (distance_to_light = hippt::length(shadow_ray_direction));\n    }\n\n    bool in_shadow = false;\n    if (sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED)\n        in_shadow = false;\n    else if (render_data.render_settings.restir_di_settings.do_final_shading_visibility)\n    {\n        hiprtRay shadow_ray;\n        shadow_ray.origin = closest_hit_info.inter_point;\n        shadow_ray.direction = shadow_ray_direction;\n\n        in_shadow = evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, /* bounce. Always 0 for ReSTIR */0, random_number_generator);\n    }\n\n    if (!in_shadow)\n    {\n        float bsdf_pdf;\n        float cosine_at_evaluated_point;\n\n        BSDFIncidentLightInfo incident_light_info = sample.flags_to_BSDF_incident_light_info();\n        BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, /* bounce. Always 0 for ReSTIR DI */ 0, 0.0f, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n        ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n        cosine_at_evaluated_point = hippt::dot(closest_hit_info.shading_normal, shadow_ray_direction);\n        if (sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_SAMPLED_FROM_GLASS_REFRACT_LOBE)\n            // We're not allowing samples that are below the surface\n            // UNLESS it's a BSDF refraction sample in which case it's valid\n            // so we're restoring the cosine term to be > 0.0f so that it passes\n            // the if() condition below\n            cosine_at_evaluated_point = hippt::abs(cosine_at_evaluated_point);\n\n        if (cosine_at_evaluated_point > 0.0f)\n        {\n            ColorRGB32F sample_emission;\n\n            if (sample.is_envmap_sample())\n            {\n                float envmap_pdf;\n                sample_emission = envmap_eval(render_data, shadow_ray_direction, envmap_pdf);\n            }\n            else\n            {\n                int material_index = render_data.buffers.material_indices[sample.emissive_triangle_index];\n                sample_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n            }\n\n            float area_measure_to_solid_angle_conversion;\n            if (sample.is_envmap_sample())\n                area_measure_to_solid_angle_conversion = 1.0f;\n            else\n            {\n                float3 emissive_triangle_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, sample.emissive_triangle_index));\n                area_measure_to_solid_angle_conversion = compute_cosine_term_at_light_source(emissive_triangle_normal, -shadow_ray_direction);\n                area_measure_to_solid_angle_conversion /= hippt::square(distance_to_light);\n            }\n\n            final_color = bsdf_color * reservoir.UCW * sample_emission * cosine_at_evaluated_point * area_measure_to_solid_angle_conversion;\n        }\n    }\n\n    return final_color;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void validate_reservoir(const HIPRTRenderData& render_data, ReSTIRDIReservoir& reservoir)\n{\n    if (reservoir.sample.is_envmap_sample() && render_data.world_settings.ambient_light_type != AmbientLightType::ENVMAP)\n        // Killing the reservoir if it was an envmap sample but the envmap is not used anymore\n        reservoir.UCW = 0.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_light_ReSTIR_DI(const HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator, int2 pixel_coords)\n{\n\tint pixel_index = pixel_coords.x + pixel_coords.y * render_data.render_settings.render_resolution.x;\n    \n\t// Because the spatial reuse pass runs last, the output buffer of the spatial\n\t// pass contains the reservoir whose sample we're going to shade\n\tReSTIRDIReservoir& reservoir = render_data.render_settings.restir_di_settings.restir_output_reservoirs[pixel_index];\n\n    // Validates the reservoir i.e. kills the reservoir if it isn't valid\n    // anymore i.e. if it refers to a light that doesn't exist anymore\n    validate_reservoir(render_data, reservoir);\n\n\treturn evaluate_ReSTIR_DI_reservoir(render_data, ray_payload, \n        closest_hit_info, view_direction, \n        reservoir, random_number_generator);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/DI/PresampledLight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RESTIR_DI_PRESAMPLED_LIGHT_H\n#define RESTIR_DI_PRESAMPLED_LIGHT_H\n\n#include \"Device/includes/ReSTIR/DI/SampleFlags.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Math.h\"\n\nstruct ReSTIRDIPresampledLight\n{\n\t// Global primitive index corresponding to the emissive triangle sampled\n\tint emissive_triangle_index = -1;\n\n\t// For envmap samples, this 'point_on_light_source' is the envmap direction in *envmap space*\n\t// A sample is an envmap sample if 'flags' contains 'RESTIR_DI_FLAGS_ENVMAP_SAMPLE'\n\tfloat3 point_on_light_source = { 0, 0, 0 };\n\t// Only defined if the sample isn't an envmap sample\n\tfloat3 light_source_normal = { 0, 0, 0 };\n\n\tColorRGB32F radiance;\n\tfloat pdf = 0.0f;\n\n\t// Some flags about the sample\n\tunsigned char flags = RESTIR_DI_FLAGS_NONE;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/DI/Reservoir.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_RESERVOIR_H\n#define DEVICE_RESTIR_DI_RESERVOIR_H\n\n#include \"Device/includes/ReSTIR/DI/SampleFlags.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifndef __KERNELCC__\n#include \"Utils/Utils.h\"\n\n// For multithreaded console error logging on the CPU if NaNs are detected\n#include <mutex>\nstatic std::mutex restir_di_log_mutex;\n#endif\n\nstruct ReSTIRDISample\n{\n    // For envmap samples, this 'point_on_light_source' is the envmap direction in *envmap space*\n    // A sample is an envmap sample if 'flags' contains 'RESTIR_DI_FLAGS_ENVMAP_SAMPLE'\n    float3 point_on_light_source = { 0, 0, 0 };\n\n    // Global primitive index corresponding to the emissive triangle sampled\n    int emissive_triangle_index = -1;\n\n    float target_function = 0.0f;\n\n    // Some flags about the sample\n    unsigned char flags = RESTIR_DI_FLAGS_NONE;\n\n    HIPRT_HOST_DEVICE bool is_envmap_sample() const\n    {\n        return flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_ENVMAP_SAMPLE;\n    }\n\n    HIPRT_HOST_DEVICE static int flags_from_BSDF_incident_light_info(BSDFIncidentLightInfo sampled_lobe_info)\n    {\n        return static_cast<int>(sampled_lobe_info);\n    }\n\n    HIPRT_HOST_DEVICE BSDFIncidentLightInfo flags_to_BSDF_incident_light_info() const\n    {\n        return static_cast<BSDFIncidentLightInfo>(flags & (0b111111 << (BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE - 1)));\n    }\n};\n\nstruct ReSTIRDIReservoir\n{\n    HIPRT_HOST_DEVICE void add_one_candidate(ReSTIRDISample new_sample, float weight, Xorshift32Generator& random_number_generator)\n    {\n        M++;\n        weight_sum += weight;\n\n        if (random_number_generator() < weight / weight_sum)\n            sample = new_sample;\n    }\n\n    /**\n     * Combines 'other_reservoir' into this reservoir\n     * \n     * 'target_function' is the target function evaluated at the pixel that is doing the\n     *      resampling with the sample from the reservoir that we're combining (which is 'other_reservoir')\n     * \n     * 'jacobian_determinant' is the determinant of the jacobian. In ReSTIR DI, it is used\n     *      for converting the solid angle PDF (or UCW since the UCW is an estimate of the PDF)\n     *      with respect to the shading point of the reservoir we're resampling to the solid\n     *      angle PDF with respect to the shading point of 'this' reservoir\n     * \n     * 'random_number_generator' for generating the random number that will be used to stochastically\n     *      select the sample from 'other_reservoir' or not\n     */\n    HIPRT_HOST_DEVICE bool combine_with(ReSTIRDIReservoir other_reservoir, float mis_weight, float target_function, float jacobian_determinant, Xorshift32Generator& random_number_generator)\n    {\n        if (other_reservoir.UCW <= 0.0f)\n        {\n            // Not going to be resampled anyways because of invalid UCW so quit exit\n            M += other_reservoir.M;\n\n            return false;\n        }\n\n        float reservoir_sample_weight = mis_weight * target_function * other_reservoir.UCW * jacobian_determinant;\n\n        M += other_reservoir.M;\n        weight_sum += reservoir_sample_weight;\n\n        if (random_number_generator() < reservoir_sample_weight / weight_sum)\n        {\n            sample = other_reservoir.sample;\n            sample.target_function = target_function;\n\n            return true;\n        }\n\n        return false;\n    }\n\n    HIPRT_HOST_DEVICE void end()\n    {\n        if (weight_sum == 0.0f)\n            UCW = 0.0f;\n        else\n            UCW = 1.0f / sample.target_function * weight_sum;\n    }\n\n    HIPRT_HOST_DEVICE void end_with_normalization(float normalization_numerator, float normalization_denominator)\n    {\n        // Checking some limit values\n        if (weight_sum == 0.0f || weight_sum < 1.0e-10f || weight_sum > 1.0e10f || normalization_denominator == 0.0f || normalization_numerator == 0.0f)\n            UCW = 0.0f;\n        else\n            UCW = 1.0f / sample.target_function * weight_sum * normalization_numerator / normalization_denominator;\n\n        // Hard limiting M to avoid explosions if the user decides not to use any M-cap (M-cap == 0)\n        M = hippt::min(M, 1000000);\n    }\n\n    HIPRT_HOST_DEVICE HIPRT_INLINE void sanity_check(int2 pixel_coords)\n    {\n#ifndef __KERNELCC__\n        if (M < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"Negative reservoir M value at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << M << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(weight_sum) || std::isinf(weight_sum))\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"NaN or inf reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (weight_sum < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"Negative reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::abs(weight_sum) < std::numeric_limits<float>::min() && weight_sum != 0.0f)\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"Denormalized weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(UCW) || std::isinf(UCW))\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"NaN or inf reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (UCW < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"Negative reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << UCW << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(sample.target_function) || std::isinf(sample.target_function))\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"NaN or inf reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (sample.target_function < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_di_log_mutex);\n            std::cerr << \"Negative reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << sample.target_function << std::endl;\n            Utils::debugbreak();\n        }\n#else\n        (void)pixel_coords;\n#endif\n    }\n\n    int M = 0;\n    // TODO weight sum is never used at the same time as UCW so only one variable can be used for both to save space\n\n    float weight_sum = 0.0f;\n    // If the UCW is set to -1, this is because the reservoir was killed by visibility reuse\n    float UCW = 0.0f;\n\n    ReSTIRDISample sample;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/DI/SampleFlags.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SAMPLE_FLAGS_H\n#define DEVICE_RESTIR_DI_SAMPLE_FLAGS_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n\nenum ReSTIRDISampleFlags\n{\n    RESTIR_DI_FLAGS_NONE = 0,\n    // The sample is an evmap sample and 'point_on_light_source'\n    // should be interpreted as a direction, not a point on a light source\n    RESTIR_DI_FLAGS_ENVMAP_SAMPLE = 1 << 0,\n    // The sample is a BSDF sample and we're indicating which lobe it comes from\n    // so that when evaluating the reservoir in FinalShading, we know what lobe\n    // the sample comes from and we can properly evaluate the BSDF\n    //\n    // We're reusing the values from the BSDFIncidentLightInfo enum here to be able\n    // to convert easily from the ReSTIRDI flags back to BSDFIncidentLightInfo (i.e.\n    // retrieve which lobe we sampled from the ReSTIRDISampleFlags)\n    RESTIR_DI_FLAGS_SAMPLED_FROM_COAT_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE,\n    RESTIR_DI_FLAGS_SAMPLED_FROM_FIRST_METAL_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_FIRST_METAL_LOBE,\n    RESTIR_DI_FLAGS_SAMPLED_FROM_SECOND_METAL_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SECOND_METAL_LOBE,\n    RESTIR_DI_FLAGS_SAMPLED_FROM_SPECULAR_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SPECULAR_LOBE,\n    RESTIR_DI_FLAGS_SAMPLED_FROM_GLASS_REFLECT_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFLECT_LOBE,\n    RESTIR_DI_FLAGS_SAMPLED_FROM_GLASS_REFRACT_LOBE = BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE,\n    // This sample *AT ITS OWN PIXEL* is unoccluded. This can be used to avoid tracing\n    // rays for visibility since we know it's unoccluded already\n    RESTIR_DI_FLAGS_UNOCCLUDED = 1 << 7\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/DI/TargetFunction.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_TARGET_FUNCTION_H\n#define DEVICE_RESTIR_DI_TARGET_FUNCTION_H\n\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 ReSTIR_DI_get_light_sample_direction(const HIPRTRenderData& render_data, const ReSTIRDISample& sample, \n\tfloat3 surface_shading_point, float& out_distance_to_light)\n{\n\tfloat3 sample_direction;\n\tif (sample.is_envmap_sample())\n\t{\n\t\tsample_direction = matrix_X_vec(render_data.world_settings.envmap_to_world_matrix, sample.point_on_light_source);\n\t\tout_distance_to_light = 1.0e35f;\n\t}\n\telse\n\t{\n\t\tsample_direction = sample.point_on_light_source - surface_shading_point;\n\t\tsample_direction = sample_direction / (out_distance_to_light = hippt::length(sample_direction));\n\t}\n\n\treturn sample_direction;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F ReSTIR_DI_get_light_sample_emission(const HIPRTRenderData& render_data, const ReSTIRDISample& sample, float3 sample_direction)\n{\n\tColorRGB32F sample_emission;\n\tif (sample.is_envmap_sample())\n\t{\n\t\tfloat envmap_pdf;\n\t\tsample_emission = envmap_eval(render_data, sample_direction, envmap_pdf);\n\t}\n\telse\n\t{\n\t\tint material_index = render_data.buffers.material_indices[sample.emissive_triangle_index];\n\t\tsample_emission = render_data.buffers.materials_buffer.get_emission(material_index);\n\t}\n\n\treturn sample_emission;\n}\n\ntemplate <bool withVisibility>\nHIPRT_HOST_DEVICE HIPRT_INLINE float ReSTIR_DI_evaluate_target_function(const HIPRTRenderData& render_data, const ReSTIRDISample& sample, ReSTIRSurface& surface, Xorshift32Generator& random_number_generator)\n{\n\tif (sample.emissive_triangle_index == -1 && !sample.is_envmap_sample())\n\t\t// No sample\n\t\treturn 0.0f;\n\n\tfloat bsdf_pdf;\n\tfloat distance_to_light;\n\tfloat3 sample_direction = ReSTIR_DI_get_light_sample_direction(render_data, sample, surface.shading_point, distance_to_light);\n\n\tfloat cosine_term = hippt::dot(surface.shading_normal, sample_direction);\n\tif (cosine_term <= 0.0f)\n\t\t// If the cosine term is 0.0f, the rest is going to be multiplied by that zero-cosine-term\n\t\t// and everything is going to be 0.0f anyway so we can return already\n\t\treturn 0.0f;\n\n\tBSDFIncidentLightInfo incident_light_info = sample.flags_to_BSDF_incident_light_info();\n\tBSDFContext bsdf_context(surface.view_direction, surface.shading_normal, surface.geometric_normal, sample_direction, incident_light_info, surface.ray_volume_state, false, surface.material, /* bounce. Always 0 for ReSTIR DI */ 0, 0.0f);\n\tColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\tColorRGB32F sample_emission = ReSTIR_DI_get_light_sample_emission(render_data, sample, sample_direction);\n\n\tfloat geometry_term = 1.0f;\n\tif (!sample.is_envmap_sample())\n\t{\n\t\tfloat3 emissive_triangle_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, sample.emissive_triangle_index));\n\t\tgeometry_term = compute_cosine_term_at_light_source(emissive_triangle_normal, -sample_direction);\n\t\tgeometry_term /= hippt::square(distance_to_light);\n\t}\n\n\tfloat target_function = (bsdf_color * sample_emission * cosine_term * geometry_term).luminance();\n\tif (target_function == 0.0f)\n\t\t// Quick exit because computing the visiblity that follows isn't going\n\t\t// to change anything to the fact that we have 0.0f target function here\n\t\treturn 0.0f;\n\n\tif constexpr (withVisibility)\n\t{\n\t\thiprtRay shadow_ray;\n\t\tshadow_ray.origin = surface.shading_point;\n\t\tshadow_ray.direction = sample_direction;\n\n\t\tbool visible = !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, surface.primitive_index, /* bounce. Always 0 for ReSTIR DI*/ 0, random_number_generator);\n\n\t\ttarget_function *= visible;\n\t}\n\n\treturn target_function;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/GI/InitialCandidatesUtils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_RESTIR_GI_INITIAL_CANDIDATES_UTILS_H\n#define DEVICE_INCLUDES_RESTIR_GI_INITIAL_CANDIDATES_UTILS_H\n \n#include \"Device/includes/PathTracing.h\"\n\nHIPRT_HOST_DEVICE bool restir_gi_update_ray_throughputs(HIPRTRenderData& render_data, RayPayload& ray_payload,\n    ColorRGB32F& ray_throughput_to_visible_point, HitInfo& closest_hit_info,\n    ColorRGB32F bsdf_color, const float3& bounce_direction, float bsdf_pdf, \n    Xorshift32Generator& random_number_generator)\n{\n    ColorRGB32F throughput_attenuation = bsdf_color * hippt::abs(hippt::dot(bounce_direction, closest_hit_info.shading_normal)) / bsdf_pdf;\n    ColorRGB32F dispersion_throughput = get_dispersion_ray_color(ray_payload.volume_state.sampled_wavelength, ray_payload.material.dispersion_scale);\n\n    if (ray_payload.bounce > 0)\n    {\n        // With ReSTIR GI, we want the outgoing radiance from the second hit to the camera hit\n        // This means that we're basically not taking the first hit into account and so we're not\n        // updating the throughput (or the ray_color either, see the main loop) on the bounce 0\n\n        float rr_throughput_scaling = 1.0f;\n        // Doing the russian roulette\n        if (!do_russian_roulette(render_data.render_settings, ray_payload.bounce, ray_payload.throughput, rr_throughput_scaling, throughput_attenuation, random_number_generator))\n        {\n            // Killed by russian roulette\n            ray_throughput_to_visible_point = ColorRGB32F(0.0f);\n\n            return false;\n        }\n        else\n        {\n            // Not killed by russian roulette so we're scaling the throughputs\n            ray_throughput_to_visible_point *= rr_throughput_scaling;\n        }\n\n        // Dispersion ray throughput filter\n        ray_throughput_to_visible_point *= dispersion_throughput;\n        ray_throughput_to_visible_point *= throughput_attenuation;\n        // Clamp every component to a minimum of 1.0e-5f to avoid numerical instabilities that can\n        // happen: with some material, the throughput can get so low that it becomes denormalized and\n        // this can cause issues in some parts of the renderer (most notably the NaN detection)\n        ray_throughput_to_visible_point.max(ColorRGB32F(1.0e-5f, 1.0e-5f, 1.0e-5f));\n    }\n    else\n    {\n        if (ray_payload.bounce >= render_data.render_settings.russian_roulette_min_depth && render_data.render_settings.do_russian_roulette)\n            // Advancing the random number generation just to match non-ReSTIR GI path tracing in terms of randomness\n            random_number_generator();\n    }\n\n    ray_payload.throughput *= dispersion_throughput;\n    ray_payload.throughput *= throughput_attenuation;\n    // Clamp every component to a minimum of 1.0e-5f to avoid numerical instabilities that can\n    // happen: with some material, the throughput can get so low that it becomes denormalized and\n    // this can cause issues in some parts of the renderer (most notably the NaN detection)\n    ray_payload.throughput.max(ColorRGB32F(1.0e-5f, 1.0e-5f, 1.0e-5f));\n\n    return true;\n}\n\n /**\n * Returns true if the bounce was sampled successfully,\n * false otherwise (is the BSDF sample failed, if russian roulette killed the sample, ...)\n */\nHIPRT_HOST_DEVICE bool restir_gi_compute_next_indirect_bounce(HIPRTRenderData& render_data, RayPayload& ray_payload, \n    ColorRGB32F& ray_throughput_to_visible_point, HitInfo& closest_hit_info,\n    float3 view_direction, hiprtRay& out_ray, Xorshift32Generator& random_number_generator, BSDFIncidentLightInfo* incident_light_info = nullptr, float* out_bsdf_pdf = nullptr)\n{\n    ColorRGB32F bsdf_color;\n    float3 bounce_direction;\n    float bsdf_pdf;\n    path_tracing_sample_next_indirect_bounce(render_data, ray_payload, closest_hit_info, view_direction, bsdf_color, bounce_direction, bsdf_pdf, random_number_generator, incident_light_info);\n\n    if (out_bsdf_pdf != nullptr)\n        *out_bsdf_pdf = bsdf_pdf;\n\n    // Terminate ray if bad sampling\n    if (bsdf_pdf <= 0.0f)\n        return false;\n\n    if (!restir_gi_update_ray_throughputs(render_data, ray_payload, ray_throughput_to_visible_point, closest_hit_info, bsdf_color, bounce_direction, bsdf_pdf, random_number_generator))\n        return false;\n\n    out_ray.origin = closest_hit_info.inter_point;\n    out_ray.direction = bounce_direction;\n\n    return true;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/GI/Reservoir.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_GI_RESERVOIR_H\n#define DEVICE_RESTIR_GI_RESERVOIR_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n#include \"Device/includes/RayVolumeState.h\"\n\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifndef __KERNELCC__\n#include \"Utils/Utils.h\"\n\n// For multithreaded console error logging on the CPU if NaNs are detected\n#include <mutex>\nstatic std::mutex restir_gi_log_mutex;\n#endif\n\nstruct ReSTIRGISample\n{\n    float3 sample_point = make_float3(-1.0f, -1.0f, -1.0f);\n\n    int sample_point_primitive_index = -1;\n\n    RGBE9995Packed incoming_radiance_to_visible_point;\n\n    BSDFIncidentLightInfo incident_light_info_at_visible_point = BSDFIncidentLightInfo::NO_INFO;\n\n    // TODO is this one needed? I guess we're going to get a bunch of wrong shading where a sample was resampled and at shading time it hits an alpha geometry where that alpha geometry let the ray through at initial candidates sampling time. This should be unbiased? Maybe not actually. But is it that bad?\n    unsigned int visible_to_sample_point_alpha_test_random_seed = 42;\n\n    // TODO can be stored in outoging_radiance_to_first_hit?\n    float target_function = 0.0f;\n\n    // Whether or not the sample point is on a material that is rough enough to be reconnected\n    // If the sample point is on a mirror for example, reconnecting to that point from our center pixel\n    // is going to change the view direction of the mirror BSDF without changing the incident light\n    // direction of the mirror BSDF and that's not going to work\n    //\n    // Also, because we do not re-evaluate the BSDF at the sample point, this would lead to some brightening\n    // bias because this would be assuming that reconnecting to the mirror has non-zero energy, even with\n    // the new view direction which is incorrect\n    //\n    // Is the bias bad if not using this?\n    bool sample_point_rough_enough = false;\n\n    Octahedral24BitNormalPadded32b sample_point_geometric_normal;\n\n    HIPRT_HOST_DEVICE bool is_envmap_path() const {  return sample_point_primitive_index == -1; }\n};\n\nstruct ReSTIRGIReservoir\n{\n    HIPRT_HOST_DEVICE void add_one_candidate(ReSTIRGISample new_sample, float weight, Xorshift32Generator& random_number_generator)\n    {\n        M++;\n        weight_sum += weight;\n\n        if (random_number_generator() < weight / weight_sum)\n            sample = new_sample;\n    }\n\n    /**\n     * Combines 'other_reservoir' into this reservoir\n     * \n     * 'target_function' is the target function evaluated at the pixel that is doing the\n     *      resampling with the sample from the reservoir that we're combining (which is 'other_reservoir')\n     * \n     * 'jacobian_determinant' is the determinant of the jacobian. In ReSTIR DI, it is used\n     *      for converting the solid angle PDF (or UCW since the UCW is an estimate of the PDF)\n     *      with respect to the shading point of the reservoir we're resampling to the solid\n     *      angle PDF with respect to the shading point of 'this' reservoir\n     * \n     * 'random_number_generator' for generating the random number that will be used to stochastically\n     *      select the sample from 'other_reservoir' or not\n     */\n    HIPRT_HOST_DEVICE bool combine_with(const ReSTIRGIReservoir& other_reservoir, float mis_weight, float target_function, float jacobian_determinant, Xorshift32Generator& random_number_generator)\n    {\n        // Bullet point 4. of the intro of Section 5.2 of [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n        float reservoir_resampling_weight = mis_weight * target_function * other_reservoir.UCW * jacobian_determinant;\n\n        weight_sum += reservoir_resampling_weight;\n        M += other_reservoir.M;\n\n        if (random_number_generator() < reservoir_resampling_weight / weight_sum)\n        {\n            sample = other_reservoir.sample;\n            sample.target_function = target_function;\n\n            return true;\n        }\n\n        return false;\n    }\n\n    HIPRT_HOST_DEVICE void end()\n    {\n        if (weight_sum == 0.0f)\n            UCW = 0.0f;\n        else\n            UCW = 1.0f / sample.target_function * weight_sum;\n    }\n\n    HIPRT_HOST_DEVICE void end_with_normalization(float normalization_numerator, float normalization_denominator)\n    {\n        // Checking some limit values\n        if (weight_sum == 0.0f || weight_sum > 1.0e10f || normalization_denominator == 0.0f || normalization_numerator == 0.0f)\n            UCW = 0.0f;\n        else\n            UCW = 1.0f / sample.target_function * weight_sum * normalization_numerator / normalization_denominator;\n\n        // Hard limiting M to avoid explosions if the user decides not to use any M-cap (M-cap == 0)\n        M = hippt::min(M, 1000000);\n    }\n\n    HIPRT_HOST_DEVICE HIPRT_INLINE void sanity_check(int2 pixel_coords)\n    {\n#ifndef __KERNELCC__\n        if (M < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"Negative reservoir M value at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << M << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(weight_sum) || std::isinf(weight_sum))\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"NaN or inf reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (weight_sum < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"Negative reservoir weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::abs(weight_sum) < std::numeric_limits<float>::min() && weight_sum != 0.0f)\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"Denormalized weight_sum at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << weight_sum << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(UCW) || std::isinf(UCW))\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"NaN or inf reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (UCW < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"Negative reservoir UCW at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << UCW << std::endl;\n            Utils::debugbreak();\n        }\n        else if (std::isnan(sample.target_function) || std::isinf(sample.target_function))\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"NaN or inf reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \")\" << std::endl;\n            Utils::debugbreak();\n        }\n        else if (sample.target_function < 0)\n        {\n            std::lock_guard<std::mutex> lock(restir_gi_log_mutex);\n            std::cerr << \"Negative reservoir sample.target_function at pixel (\" << pixel_coords.x << \", \" << pixel_coords.y << \"): \" << sample.target_function << std::endl;\n            Utils::debugbreak();\n        }\n#else\n        (void)pixel_coords;\n#endif\n    }\n\n    ReSTIRGISample sample;\n\n    int M = 0;\n    // TODO weight sum is never used at the same time as UCW so only one variable can be used for both to save space\n    float weight_sum = 0.0f;\n    // If the UCW is set to -1, this is because the reservoir was killed by visibility reuse\n    float UCW = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/GI/TargetFunction.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_GI_TARGET_FUNCTION_H\n#define DEVICE_RESTIR_GI_TARGET_FUNCTION_H\n\n#include \"Device/includes/LightSampling/Lights.h\"\n#include \"Device/includes/ReSTIR/Jacobian.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\ntemplate <bool withVisiblity, bool resamplingNeighbor = true>\nHIPRT_HOST_DEVICE float ReSTIR_GI_evaluate_target_function(const HIPRTRenderData& render_data, const ReSTIRGISample& sample, ReSTIRSurface& surface, Xorshift32Generator& random_number_generator)\n{\n\tfloat distance_to_sample_point;\n\tfloat3 incident_light_direction;\n\tif (sample.is_envmap_path())\n\t{\n\t\t// For envmap path, the direction is stored in the 'sample_point' value\n\t\tincident_light_direction = sample.sample_point;\n\t\tdistance_to_sample_point = 1.0e35f;\n\t}\n\telse\n\t{\n\t\t// Not an envmap path, the direction is the difference between the current shading\n\t\t// point and the reconnection point\n\t\tincident_light_direction = sample.sample_point - surface.shading_point;\n\t\tdistance_to_sample_point = hippt::length(incident_light_direction);\n\t\tif (distance_to_sample_point <= 1.0e-6f)\n\t\t\t// To avoid numerical instabilities\n\t\t\treturn 0.0f;\n\n\t\tincident_light_direction /= distance_to_sample_point;\n\t}\n\n\tfloat cosine_term = hippt::dot(incident_light_direction, surface.shading_normal);\n\tif (cosine_term <= 0.0f && sample.incident_light_info_at_visible_point != BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFRACT_LOBE)\n\t\treturn 0.0f;\n\telse if constexpr (resamplingNeighbor)\n\t{\n\t\t// If resampling a neighbor, the target function is going to evaluate to 0.0f if the sample point of the neighbor\n\t\t// is specular: that is because when resampling a neighbor, i.e. reconnecting to the sample point of the neighbor,\n\t\t// we're changing the view direction of the BSDF at the sample point.\n\t\t//\n\t\t// And changing the view direction of a specular BSDF without changing the incident light direction (which we are not\n\t\t// modifying) isn't going to adhere to the law of perfect reflection and so the contribution of the BSDF at the neighbor's\n\t\t// sample point will be 0.0f.\n\t\t//\n\t\t// So that's why we're returning 0.0f here\n\t\tif (render_data.render_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic && !sample.sample_point_rough_enough)\n\t\t\treturn 0.0f;\n\t}\n\n\tif constexpr (withVisiblity)\n\t{\n\t\thiprtRay visibility_ray;\n\t\tvisibility_ray.origin = surface.shading_point;\n\t\tvisibility_ray.direction = incident_light_direction;\n\n\t\tXorshift32Generator random_number_generator_alpha_test(sample.visible_to_sample_point_alpha_test_random_seed);\n\t\tbool sample_point_occluded = evaluate_shadow_ray_occluded(render_data, visibility_ray, distance_to_sample_point, surface.primitive_index, 0, random_number_generator_alpha_test);\n\t\tif (sample_point_occluded)\n\t\t\treturn 0.0f;\n\t}\n\n\tfloat bsdf_pdf;\n\tBSDFContext bsdf_context(surface.view_direction, surface.shading_normal, surface.geometric_normal, incident_light_direction, const_cast<BSDFIncidentLightInfo&>(sample.incident_light_info_at_visible_point), surface.ray_volume_state, false, surface.material, 0, 0.0f, MicrofacetRegularization::RegularizationMode::NO_REGULARIZATION);\n\tColorRGB32F visible_point_bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\tif (bsdf_pdf > 0.0f)\n\t\tvisible_point_bsdf_color *= hippt::abs(cosine_term);\n\n\treturn (visible_point_bsdf_color * sample.incoming_radiance_to_visible_point.unpack()).luminance();\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/Jacobian.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_JACOBIAN_H\n#define DEVICE_RESTIR_JACOBIAN_H\n\n#include \"Device/includes/LightSampling/LightUtils.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float get_jacobian_determinant_reconnection_shift(const float3& reconnection_point, const float3& reconnection_point_surface_normal, const float3& point_being_reconnected, const float3& vertex_before_reconnection_original_path, float jacobian_threshold)\n{\n\tfloat3 direction_to_reconnection_point_from_center = reconnection_point - point_being_reconnected;\n\tfloat3 direction_to_reconnection_point_from_neighbor = reconnection_point - vertex_before_reconnection_original_path;\n\tfloat distance_to_reconnection_point_from_center = hippt::length(direction_to_reconnection_point_from_center);\n\tfloat distance_to_reconnection_point_from_neighbor = hippt::length(direction_to_reconnection_point_from_neighbor);\n\tdirection_to_reconnection_point_from_center /= distance_to_reconnection_point_from_center;\n\tdirection_to_reconnection_point_from_neighbor /= distance_to_reconnection_point_from_neighbor;\n\n\tfloat cosine_at_reconnection_point_from_center = hippt::abs(hippt::dot(-direction_to_reconnection_point_from_center, reconnection_point_surface_normal));\n\tfloat cosine_at_reconnection_point_from_neighbor = hippt::abs(hippt::dot(-direction_to_reconnection_point_from_neighbor, reconnection_point_surface_normal));\n\n\tfloat cosine_ratio = cosine_at_reconnection_point_from_center / cosine_at_reconnection_point_from_neighbor;\n\tfloat distance_squared_ratio = (distance_to_reconnection_point_from_neighbor * distance_to_reconnection_point_from_neighbor) / (distance_to_reconnection_point_from_center * distance_to_reconnection_point_from_center);\n\n\tfloat jacobian_determinant = cosine_ratio * distance_squared_ratio;\n\n\tif (jacobian_determinant > jacobian_threshold || jacobian_determinant < 1.0f / jacobian_threshold || hippt::is_nan(jacobian_determinant) || hippt::is_inf(jacobian_determinant))\n\t\t// Samples are too dissimilar, returning 0 to indicate that we must reject the sample\n\t\treturn 0.0f;\n\telse\t\n\t\treturn jacobian_determinant;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/MISWeightsCommon.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_MIS_WEIGHTS_COMMON_H\n#define DEVICE_RESTIR_DI_MIS_WEIGHTS_COMMON_H\n\n/**\n * Forward declarations\n */\nstruct ReSTIRDISample;\nstruct ReSTIRGISample;\nstruct ReSTIRDIReservoir;\nstruct ReSTIRGIReservoir;\n\n /**\n * The ReSTIRTypeStruct is used to automatically determine what SampleType to use\n * based on the 'IsReSTIRGI' template parameter\n *\n * This allows us to use the ReSTIRDISample type of ReSTIRGISample type automatically\n * based on whether or not we're instantiating the structures for ReSTIR DI or ReSTIR GI\n *\n * This sample type is then used in some of the specialization to pass to the target functions\n */\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTypeStruct {};\n\ntemplate <>\nstruct ReSTIRTypeStruct<false>\n{\n\tusing SampleType = ReSTIRDISample;\n\tusing ReservoirType = ReSTIRDIReservoir;\n};\n\ntemplate <>\nstruct ReSTIRTypeStruct<true>\n{\n\tusing SampleType = ReSTIRGISample;\n\tusing ReservoirType = ReSTIRGIReservoir;\n};\n\ntemplate <bool IsReSTIRGI>\nusing ReSTIRSampleType = typename ReSTIRTypeStruct<IsReSTIRGI>::SampleType;\n\ntemplate <bool IsReSTIRGI>\nusing ReSTIRReservoirType = typename ReSTIRTypeStruct<IsReSTIRGI>::ReservoirType;\n\nHIPRT_DEVICE float symmetric_ratio_MIS_weights_difference_function(float target_function_at_center, float target_function_from_i, float exponent)\n{\n\tif (target_function_at_center == 0.0f || target_function_from_i == 0.0f)\n\t\treturn 0.0f;\n\n\tfloat ratio = hippt::min(target_function_at_center / target_function_from_i, target_function_from_i / target_function_at_center);\n\n\tif (exponent == 2.0f)\n\t\treturn hippt::square(ratio);\n\telse if (exponent == 3.0f)\n\t\treturn hippt::pow_3(ratio);\n\telse if (exponent == 4.0f)\n\t\treturn hippt::pow_4(ratio);\n\telse\n\t\treturn powf(ratio, exponent);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/NeighborSimilarity.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_NEIGHBOR_SIMILARITY_H\n#define DEVICE_RESTIR_NEIGHBOR_SIMILARITY_H\n \n#include \"Device/includes/ReSTIR/Jacobian.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n\n/**\n * Returns true if the two given points pass the plane distance check, false otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool plane_distance_heuristic(const ReSTIRCommonNeighborSimiliaritySettings& neighbor_similarity_settings, const float3& temporal_world_space_point, const float3& current_point, const float3& current_surface_normal, float plane_distance_threshold)\n{\n\tif (!neighbor_similarity_settings.use_plane_distance_heuristic)\n\t\treturn true;\n\n\tfloat3 direction_between_points = temporal_world_space_point - current_point;\n\tfloat distance_to_plane = hippt::abs(hippt::dot(direction_between_points, current_surface_normal));\n\n\treturn distance_to_plane < plane_distance_threshold;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE bool normal_similarity_heuristic(const ReSTIRCommonNeighborSimiliaritySettings& neighbor_similarity_settings, const float3& current_normal, const float3& neighbor_normal, float threshold)\n{\n\tif (!neighbor_similarity_settings.use_normal_similarity_heuristic)\n\t\treturn true;\n\n\treturn hippt::dot(current_normal, neighbor_normal) > threshold;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE bool roughness_similarity_heuristic(const ReSTIRCommonNeighborSimiliaritySettings& neighbor_similarity_settings, float neighbor_roughness, float center_pixel_roughness, float threshold)\n{\n\tif (!neighbor_similarity_settings.use_roughness_similarity_heuristic)\n\t\treturn true;\n\n\t// We don't want to temporally reuse on materials smoother than 0.075f because this\n\t// causes near-specular/glossy reflections to darken when camera ray jittering is used.\n\t// \n\t// This glossy reflections darkening only happens with confidence weights and \n\t// ray jittering but I'm not sure why. Probably because samples from one pixel (or sub-pixel location)\n\t// cannot efficiently be reused at another pixel (or sub-pixel location through jittering)\n\t// but confidence weights overweight these bad neighbor samples --> you end up using these\n\t// bad samples --> the shading loses in energy since we're now shading with samples that\n\t// don't align well with the glossy reflection direction\n\treturn hippt::abs(neighbor_roughness - center_pixel_roughness) < threshold;\n}\n\ntemplate <bool IsReSTIRGI>\nHIPRT_HOST_DEVICE HIPRT_INLINE bool check_neighbor_similarity_heuristics(const HIPRTRenderData& render_data,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t int neighbor_pixel_index, int center_pixel_index, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t const float3& current_shading_point, const float3& current_normal, bool previous_frame = false)\n{\n\tif (neighbor_pixel_index == center_pixel_index)\n\t\t// A pixel always passes the similarity test with itself\n\t\treturn true;\n\n\tif (previous_frame)\n\t{\n\t\tif (render_data.g_buffer_prev_frame.first_hit_prim_index[neighbor_pixel_index] == -1)\n\t\t\t// Cannot reuse from a neighbor that doesn't have a primary hit (direct miss into the envmap)\n\t\t\treturn false;\n\t}\n\telse\n\t{\n\t\tif (render_data.g_buffer.first_hit_prim_index[neighbor_pixel_index] == -1)\n\t\t\t// Cannot reuse from a neighbor that doesn't have a primary hit (direct miss into the envmap)\n\t\t\treturn false;\n\t}\n\n\tconst ReSTIRCommonNeighborSimiliaritySettings& neighbor_similarity_settings = ReSTIRSettingsHelper::get_restir_neighbor_similarity_settings<IsReSTIRGI>(render_data);\n\n\tfloat3 neighbor_world_space_point;\n\tfloat neighbor_roughness = 0.0f;\n\tfloat current_material_roughness = 0.0f;\n\n\tif (previous_frame)\n\t{\n\t\tif (neighbor_similarity_settings.use_plane_distance_heuristic)\n\t\t\t// Only getting the point plane distance heuristic, otherwise it's never used\n\t\t\tneighbor_world_space_point = render_data.g_buffer_prev_frame.primary_hit_position[neighbor_pixel_index];\n\n\t\tif (neighbor_similarity_settings.use_roughness_similarity_heuristic)\n\t\t\t// Only getting the roughness for the roughness heuristic otherwise it's not going to be used\n\t\t\tneighbor_roughness = render_data.g_buffer_prev_frame.materials[neighbor_pixel_index].get_roughness();\n\t}\n\telse\n\t{\n\t\tneighbor_world_space_point = render_data.g_buffer.primary_hit_position[neighbor_pixel_index];\n\t\tneighbor_roughness = render_data.g_buffer.materials[neighbor_pixel_index].get_roughness();\n\t}\n\n\tif (neighbor_similarity_settings.use_roughness_similarity_heuristic)\n\t\t// Getting the roughness at the current point\n\t\tcurrent_material_roughness = render_data.g_buffer.materials[center_pixel_index].get_roughness();\n\n\tfloat3 neighbor_normal = neighbor_similarity_settings.reject_using_geometric_normals ? render_data.g_buffer.geometric_normals[neighbor_pixel_index].unpack() : render_data.g_buffer.shading_normals[neighbor_pixel_index].unpack();\n\tbool plane_distance_passed = plane_distance_heuristic(neighbor_similarity_settings, neighbor_world_space_point, current_shading_point, current_normal, neighbor_similarity_settings.plane_distance_threshold);\n\tbool normal_similarity_passed = normal_similarity_heuristic(neighbor_similarity_settings, current_normal, neighbor_normal, neighbor_similarity_settings.normal_similarity_angle_precomp);\n\tbool roughness_similarity_passed = roughness_similarity_heuristic(neighbor_similarity_settings, neighbor_roughness, current_material_roughness, neighbor_similarity_settings.roughness_similarity_threshold);\n\n\treturn plane_distance_passed && normal_similarity_passed && roughness_similarity_passed;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/OptimalVisibilitySampling.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_OPTIMAL_VISIBILITY_SAMPLING_H\n#define DEVICE_RESTIR_OPTIMAL_VISIBILITY_SAMPLING_H\n\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\" // For the ReSTIRReservoirType\n#include \"Device/includes/ReSTIR/Utils.h\" // For the ReSTIRReservoirType\n\n#include \"HostDeviceCommon/KernelOptions/ReSTIRDIOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/ReSTIRGIOptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE bool ReSTIR_optimal_visibility_sampling(HIPRTRenderData& render_data, \n\tReSTIRReservoirType<IsReSTIRGI>& spatial_reuse_output_reservoir, \n\tconst ReSTIRReservoirType<IsReSTIRGI>& center_pixel_reservoir, ReSTIRSurface& center_pixel_surface, \n\tint neighbor_index, int reused_neighbors_count, \n\tXorshift32Generator& random_number_generator)\n{\n#if ReSTIR_DI_DoOptimalVisibilitySampling == KERNEL_OPTION_TRUE || ReSTIR_GI_DoOptimalVisibilitySampling == KERNEL_OPTION_TRUE\n\tbool at_least_one_neighbor_resampled = spatial_reuse_output_reservoir.weight_sum > 0.0f;\n\tbool last_neighbor_before_canonical = neighbor_index == reused_neighbors_count - 1;\n\tconstexpr bool ovs_enabled = (!IsReSTIRGI && ReSTIR_DI_DoOptimalVisibilitySampling == KERNEL_OPTION_TRUE) || (IsReSTIRGI && ReSTIR_GI_DoOptimalVisibilitySampling == KERNEL_OPTION_TRUE);\n\tif (at_least_one_neighbor_resampled && last_neighbor_before_canonical && ovs_enabled)\n\t{\n\t\t// If the spatial neighbors resampled up until now are occluded, they will be discarded by this\n\t\t// visiblity test and so the canonical sample will be the resulting reservoir\n\n\t\tbool reservoir_killed;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treservoir_killed = ReSTIR_GI_visibility_validation(render_data, spatial_reuse_output_reservoir, center_pixel_surface.shading_point, center_pixel_surface.last_hit_primitive_index, random_number_generator);\n\t\telse\n\t\t\treservoir_killed = ReSTIR_DI_visibility_test_kill_reservoir(render_data, spatial_reuse_output_reservoir, center_pixel_surface.shading_point, center_pixel_surface.last_hit_primitive_index, random_number_generator);\n\n\t\tif (reservoir_killed)\n\t\t\tspatial_reuse_output_reservoir.weight_sum = 0.0f;\n\n\t\treturn reservoir_killed;\n\t}\n#endif\n\n\treturn false;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/FinalShading.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDE_REGIR_FINAL_SHADING_H\n#define DEVICE_INCLUDE_REGIR_FINAL_SHADING_H\n\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_DEVICE HIPRT_INLINE ColorRGB32F sample_one_light_ReGIR(HIPRTRenderData& render_data, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    if (!MaterialUtils::can_do_light_sampling(ray_payload.material))\n        return ColorRGB32F(0.0f);\n\n    bool point_outside_grid = false;\n\n\tReGIRShadingAdditionalInfo additional_infos;\n    LightSampleInformation light_sample = sample_one_emissive_triangle_regir_with_info(render_data,\n        closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal,\n        closest_hit_info.primitive_index, ray_payload, \n        point_outside_grid, random_number_generator, additional_infos);\n\n    if (!point_outside_grid)\n    {\n        if (light_sample.area_measure_pdf <= 0.0f)\n            // Can happen for very small triangles\n            return ColorRGB32F(0.0f);\n\n#if ReGIR_ShadingResamplingShadeAllSamples == KERNEL_OPTION_TRUE\n        // If we're shading all samples, we already have the perfectly computed\n        // radiance in additional_infos so we can just return that\n        return additional_infos.sample_radiance;\n#endif\n        // ReGIR succeeded with sampling, just shooting a shadow ray to validate visibility\n\n        float3 shadow_ray_origin = closest_hit_info.inter_point;\n        float3 shadow_ray_direction = light_sample.point_on_light - shadow_ray_origin;\n        float distance_to_light = hippt::length(shadow_ray_direction);\n        float3 shadow_ray_direction_normalized = shadow_ray_direction / distance_to_light;\n    \n        hiprtRay shadow_ray;\n        shadow_ray.origin = shadow_ray_origin;\n        shadow_ray.direction = shadow_ray_direction_normalized;\n    \n        // NEE++ context for the shadow ray\n        NEEPlusPlusContext nee_plus_plus_context;\n        nee_plus_plus_context.point_on_light = light_sample.point_on_light;\n        nee_plus_plus_context.shaded_point = shadow_ray_origin;\n\n        bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n\n        if (!in_shadow)\n            return additional_infos.sample_radiance / light_sample.area_measure_pdf / nee_plus_plus_context.unoccluded_probability;\n        else\n            return ColorRGB32F(0.0f);\n    }\n    else\n    {\n#if ReGIR_DebugMode == REGIR_DEBUG_MODE_SAMPLING_FALLBACK\n        return ColorRGB32F(1.0e10f, 0.0f, 1.0e10f);\n#endif\n\n#if ReGIR_FallbackLightSamplingStrategy == LSS_BASE_REGIR\n        // Invalid fallback strategy\n        invalid ReGIR light sampling fallback strategy\n#endif\n        \n        // Fallback method as the point was outside of the ReGIR grid\n        light_sample = sample_one_emissive_triangle<ReGIR_FallbackLightSamplingStrategy>(render_data,\n            closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal,\n            closest_hit_info.primitive_index, ray_payload,\n            random_number_generator);\n         \n        float3 shadow_ray_origin = closest_hit_info.inter_point;\n        float3 shadow_ray_direction = light_sample.point_on_light - shadow_ray_origin;\n        float distance_to_light = hippt::length(shadow_ray_direction);\n        float3 shadow_ray_direction_normalized = shadow_ray_direction / distance_to_light;\n\n        hiprtRay shadow_ray;\n        shadow_ray.origin = shadow_ray_origin;\n        shadow_ray.direction = shadow_ray_direction_normalized;\n\n        // NEE++ context for the shadow ray\n        NEEPlusPlusContext nee_plus_plus_context;\n        nee_plus_plus_context.point_on_light = light_sample.point_on_light;\n        nee_plus_plus_context.shaded_point = shadow_ray_origin;\n\n        ColorRGB32F light_source_radiance;\n        // abs() here to allow backfacing light sources\n        float dot_light_source = compute_cosine_term_at_light_source(light_sample.light_source_normal, -shadow_ray.direction);\n\n        if (dot_light_source > 0.0f)\n        {\n            bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n\n            if (!in_shadow)\n            {\n                float bsdf_pdf;\n\n                BSDFIncidentLightInfo incident_light_info = light_sample.incident_light_info;\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE && DirectLightSamplingBaseStrategy == LSS_BASE_REGIR\n                BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray.direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n#else\n                BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, shadow_ray.direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n#endif\n                ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, random_number_generator);\n\n                if (bsdf_pdf != 0.0f)\n                {\n                    // Conversion to solid angle from surface area measure\n                    float light_sample_solid_angle_pdf = area_to_solid_angle_pdf(light_sample.area_measure_pdf, distance_to_light, dot_light_source);\n                    if (light_sample_solid_angle_pdf > 0.0f)\n                    {\n                        float cosine_term = hippt::abs(hippt::dot(closest_hit_info.shading_normal, shadow_ray.direction));\n                        light_source_radiance = light_sample.emission * cosine_term * bsdf_color / light_sample_solid_angle_pdf / nee_plus_plus_context.unoccluded_probability;\n\n                        // Just a CPU-only sanity check\n                        sanity_check</* CPUOnly */ true>(render_data, light_source_radiance, 0, 0);\n\n                        return light_source_radiance;\n                    }\n                }\n            }\n        }\n    }\n\n    return ColorRGB32F(0.0f);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/GridFillSurface.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_GRID_FILL_SURFACE_H\n#define DEVICE_INCLUDES_REGIR_GRID_FILL_SURFACE_H\n\nstruct ReGIRGridFillSurface\n{\n\tint cell_primitive_index = -1;\n\tfloat3 cell_point = make_float3(0.0f, 0.0f, 0.0f);\n\tfloat3 cell_normal = make_float3(0.0f, 0.0f, 0.0f);\n\tfloat cell_roughness = -1.0f;\n\tfloat cell_metallic = -1.0f;\n\tfloat cell_specular = -1.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/HashGridCellData.h",
    "content": "/*\n* Copyright 2025 Tom Clabault. GNU GPL3 license.\n* GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n*/\n\n#ifndef DEVICE_KERNELS_REGIR_HASH_GRID_CELL_DATA_H\n#define DEVICE_KERNELS_REGIR_HASH_GRID_CELL_DATA_H\n\n#include \"HostDeviceCommon/Packing.h\"\n\nstruct ReGIRHashCellDataSoADevice\n{\n\tstatic constexpr float UNDEFINED_DISTANCE = -42.0f;\n\n\tstatic constexpr unsigned int UNDEFINED_POINT = 0xFFFFFFFF;\n\tstatic constexpr float3 UNDEFINED_NORMAL = { 0.0f, 0.0f, 0.0f };\n\tstatic constexpr int UNDEFINED_PRIMITIVE = -1;\n\n\t// These three buffers are only allocated per each cell, not per each reservoir so they are\n\t// 'number_cells' in size\n\n\t// Buffer that holds the index of the thread that inserted into that grid cell\n\tAtomicType<int>* hit_primitive = nullptr;\n\tfloat3* world_points = nullptr;\n\tOctahedral24BitNormalPadded32b* world_normals = nullptr;\n\n\t// TODO these guys in a single buffer to have only one memory access\n\tunsigned char* roughness = nullptr;\n\tunsigned char* specular = nullptr;\n\tunsigned char* metallic = nullptr;\n\t// The checksum for each entry of the table to check for collisions\n\tAtomicType<unsigned int>* checksums = nullptr;\n\n\t// The staging buffer is used to store the grid cells that are alive during shading: for each grid cell that a ray falls into during shading,\n\t// we position the unsigned char to 1\n\t//\n\t// We need a staging buffer to do that because modifying the 'grid_cell_alive' buffer directly would be a race condition since other threads\n\t// may be reading from that buffer at the same time to see if a cell is alive or not\n\t//\n\t// That staging buffer is then copied to the 'grid_cell_alive' buffer at the end of the frame\n\tAtomicType<unsigned int>* grid_cell_alive = nullptr;\n\tunsigned int* grid_cells_alive_list = nullptr;\n\n\tAtomicType<unsigned int>* grid_cells_alive_count = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/HashGridSoADevice.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_HASH_GRID_SOA_DEVICE_H\n#define DEVICE_INCLUDES_REGIR_HASH_GRID_SOA_DEVICE_H\n\n#include \"Device/includes/ReSTIR/ReGIR/ReservoirSoA.h\"\n\nstruct ReGIRHashGridSoADevice\n{\n\t// These two SoAs are allocated to hold 'number_cells * number_reservoirs_per_cell'\n\t// So for a given 'hash_grid_cell_index', the cell contains reservoirs and samples going from \n\t// reservoirs[hash_grid_cell_index * number_reservoirs_per_cell] to reservoirs[cell_index * number_reservoirs_per_cell + number_reservoirs_per_cell[\n\tReGIRReservoirSoADevice reservoirs;\n\tReGIRSampleSoADevice samples;\n\n\tunsigned int m_total_number_of_cells = 0;\n};\n\n#endif // DEVICE_INCLUDES_REGIR_HASH_GRID_SOA_DEVICE_H\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/PresampledLight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_PRESAMPLED_LIGHT_H\n#define DEVICE_KERNELS_REGIR_PRESAMPLED_LIGHT_H\n\n#include \"HostDeviceCommon/Packing.h\"\n\nstruct ReGIRPresampledLight\n{\n\t// Index in the whole scene of the triangle sampled \n\tint emissive_triangle_index = -1;\n\n\t// Area of the sampled triangle\n\tfloat triangle_area = 0.0f;\n\n\t// Point sampled on the light\n\tfloat3 point_on_light = make_float3(0.0f, 0.0f, 0.0f);\n\n\t// Packed normal of the sampled emissive triangle\n\tOctahedral24BitNormalPadded32b normal;\n\n\t// Emission strength of the triangle\n\tColorRGB32F emission;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/ReGIRHashGrid.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_HASH_GRID_H\n#define DEVICE_INCLUDES_REGIR_HASH_GRID_H\n\n#include \"Device/includes/HashGrid.h\"\n#include \"Device/includes/HashGridHash.h\"\n#include \"Device/includes/ReSTIR/ReGIR/HashGridCellData.h\"\n#include \"Device/includes/ReSTIR/ReGIR/HashGridSoADevice.h\"\n#include \"Device/includes/ReSTIR/ReGIR/ShadingSettings.h\"\n#include \"Device/includes/ReSTIR/ReGIR/ReservoirSoA.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/ReGIROptions.h\"\n\nstruct ReGIRHashGrid\n{\n\tHIPRT_DEVICE static float compute_adaptive_cell_size_roughness(float3 world_position, const HIPRTCamera& current_camera, float roughness, bool primary_hit, float target_projected_size, float grid_cell_min_size)\n\t{\n\t\tint width = current_camera.sensor_width;\n\t\tint height = current_camera.sensor_height;\n\t\t\n#if ReGIR_HashGridAdaptiveRoughnessGridPrecision == KERNEL_OPTION_TRUE && (BSDFOverride != BSDF_LAMBERTIAN && BSDFOverride != BSDF_OREN_NAYAR)\n\t\tif (primary_hit)\n\t\t{\n\t\t\t// Only increasing the resolution for the primary hit cells where\n\t\t\t// we can actually use that resolution for resampling according to the BSDF\n\t\t\t//\n\t\t\t// For secondary grid cells, we cannot resample according to the BSDF because\n\t\t\t// we do not have the view direction so there's no point increasing the resolution.\n\n\t\t\tif (roughness >= 0.08f && roughness < 0.2f)\n\t\t\t{\n\t\t\t\tfloat t = hippt::inverse_lerp(roughness, 0.08f, 0.2f);\n\t\t\t\tfloat res_increase_factor = hippt::lerp(2.0f, 5.0f, 1.0f - t);\n\n\t\t\t\ttarget_projected_size /= res_increase_factor;\n\t\t\t\tgrid_cell_min_size /= res_increase_factor;\n\t\t\t}\n\t\t\telse if (roughness >= 0.2f && roughness < 0.35f)\n\t\t\t{\n\t\t\t\tfloat t = hippt::inverse_lerp(roughness, 0.2f, 0.35f);\n\t\t\t\tfloat res_increase_factor = hippt::lerp(1.0f, 2.0f, 1.0f - t);\n\n\t\t\t\ttarget_projected_size /= res_increase_factor;\n\t\t\t\tgrid_cell_min_size /= res_increase_factor;\n\t\t\t}\n\t\t}\n#endif\n\n#if ReGIR_HashGridConstantGridCellSize == KERNEL_OPTION_TRUE\n\t\treturn grid_cell_min_size;\n#else\n\t\tfloat cell_size_step = hippt::length(world_position - current_camera.position) * tanf(target_projected_size * current_camera.vertical_fov * hippt::max(1.0f / height, (float)height / hippt::square(width)));\n\t\tfloat log_step = floorf(log2f(cell_size_step / grid_cell_min_size));\n\n\t\treturn hippt::max(grid_cell_min_size, grid_cell_min_size * exp2f(log_step));\n#endif\n\t}\n\n\tHIPRT_DEVICE unsigned int custom_regir_hash(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, unsigned int total_number_of_cells, unsigned int& out_checksum) const\n\t{\n\t\tfloat cell_size = ReGIRHashGrid::compute_adaptive_cell_size_roughness(world_position, current_camera, roughness, primary_hit, m_grid_cell_target_projected_size, m_grid_cell_min_size);\n\n\t\t// Reference: SIGGRAPH 2022 - Advances in Spatial Hashing\n\t\tworld_position = hash_periodic_shifting(world_position, cell_size);\n\n\t\tunsigned int grid_coord_x = static_cast<int>(floorf(world_position.x / cell_size));\n\t\tunsigned int grid_coord_y = static_cast<int>(floorf(world_position.y / cell_size));\n\t\tunsigned int grid_coord_z = static_cast<int>(floorf(world_position.z / cell_size));\n\n\t\t// Using two hash functions as proposed in [WORLD-SPACE SPATIOTEMPORAL RESERVOIR REUSE FOR RAY-TRACED GLOBAL ILLUMINATION, Boisse, 2021]\n#if ReGIR_HashGridHashSurfaceNormal == KERNEL_OPTION_TRUE\n\t\t// And adding normal hasing from [World-Space Spatiotemporal Path Resampling for Path Tracing, 2023]\n\t\tunsigned int quantized_normal = hash_quantize_normal(surface_normal, primary_hit ? ReGIR_HashGridHashSurfaceNormalResolutionPrimaryHits : ReGIR_HashGridHashSurfaceNormalResolutionSecondaryHits);\n\t\tunsigned int checksum = h2_xxhash32(quantized_normal + h2_xxhash32(cell_size + h2_xxhash32(grid_coord_z + h2_xxhash32(grid_coord_y + h2_xxhash32(grid_coord_x)))));\n\t\tunsigned int cell_hash = h1_pcg(quantized_normal + h1_pcg(cell_size + h1_pcg(grid_coord_z + h1_pcg(grid_coord_y + h1_pcg(grid_coord_x))))) % total_number_of_cells;\n#else\n\t\tunsigned int checksum = h2_xxhash32(cell_size + h2_xxhash32(grid_coord_z + h2_xxhash32(grid_coord_y + h2_xxhash32(grid_coord_x))));\n\t\tunsigned int cell_hash = h1_pcg(cell_size + h1_pcg(grid_coord_z + h1_pcg(grid_coord_y + h1_pcg(grid_coord_x)))) % total_number_of_cells;\n#endif\n\n\t\tout_checksum = checksum;\n\t\treturn cell_hash;\n\t}\n\n\tHIPRT_DEVICE void reset_reservoir(ReGIRHashGridSoADevice& soa, unsigned int hash_grid_cell_index, unsigned int reservoir_index_in_cell)\n\t{\n\t\tint reservoir_index_in_grid = hash_grid_cell_index * soa.reservoirs.number_of_reservoirs_per_cell + reservoir_index_in_cell;\n\n\t\tsoa.reservoirs.store_reservoir_opt(reservoir_index_in_grid, ReGIRReservoir());\n\t\tsoa.samples.store_sample(reservoir_index_in_grid, ReGIRReservoir().sample);\n\t}\n\n\t/**\n\t * Overload if you already the hash grid cell index\n\t */\n\tHIPRT_DEVICE void store_reservoir_and_sample_opt(const ReGIRReservoir& reservoir, ReGIRHashGridSoADevice& soa, unsigned int hash_grid_cell_index, int reservoir_index_in_cell)\n\t{\n\t\tint reservoir_index_in_grid = hash_grid_cell_index * soa.reservoirs.number_of_reservoirs_per_cell + reservoir_index_in_cell;\n\n\t\tstore_full_reservoir(soa, reservoir, reservoir_index_in_grid);\n\t}\n\n\tHIPRT_DEVICE void store_reservoir_and_sample_opt(const ReGIRReservoir& reservoir, ReGIRHashGridSoADevice& soa, ReGIRHashCellDataSoADevice& hash_cell_data, \n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell)\n\t{\n\t\tunsigned int hash_key;\n\t\tunsigned int hash_grid_cell_index = custom_regir_hash(world_position, surface_normal, current_camera, roughness, primary_hit, soa.m_total_number_of_cells, hash_key);\n\t\tif (!HashGrid::resolve_collision<ReGIR_HashGridCollisionResolutionMaxSteps>(hash_cell_data.checksums, soa.m_total_number_of_cells, hash_grid_cell_index, hash_key))\n\t\t\treturn;\n\n\t\tstore_reservoir_and_sample_opt(reservoir, soa, hash_grid_cell_index, reservoir_index_in_cell);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_hash_grid_cell_index(const ReGIRHashGridSoADevice& soa, const ReGIRHashCellDataSoADevice& hash_cell_data, \n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit) const\n\t{\n\t\tunsigned int hash_key;\n\t\tunsigned int hash_grid_cell_index = custom_regir_hash(world_position, surface_normal, current_camera, roughness, primary_hit, soa.m_total_number_of_cells, hash_key);\n\t\tunsigned int original = hash_grid_cell_index;\n\t\tif (!HashGrid::resolve_collision<ReGIR_HashGridCollisionResolutionMaxSteps>(hash_cell_data.checksums, soa.m_total_number_of_cells, hash_grid_cell_index, hash_key) || hash_cell_data.grid_cell_alive[hash_grid_cell_index] == 0u)\n\t\t\treturn HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\n\t\treturn hash_grid_cell_index;\n\t}\n\n\t/**\n\t * Overload if you already the hash grid cell index\n\t */\n\tHIPRT_DEVICE unsigned int get_reservoir_index_in_grid(const ReGIRHashGridSoADevice& soa, unsigned int hash_grid_cell_index, int reservoir_index_in_cell) const\n\t{\n\t\treturn hash_grid_cell_index * soa.reservoirs.number_of_reservoirs_per_cell + reservoir_index_in_cell;\n\t}\n\n\tHIPRT_DEVICE unsigned int get_reservoir_index_in_grid(const ReGIRHashGridSoADevice& soa, const ReGIRHashCellDataSoADevice& hash_cell_data, \n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell) const\n\t{\n\t\tunsigned int hash_grid_cell_index = get_hash_grid_cell_index(soa, hash_cell_data, world_position, surface_normal, current_camera, roughness, primary_hit);\n\t\tif (hash_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\treturn HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\n\t\treturn get_reservoir_index_in_grid(soa, hash_grid_cell_index, reservoir_index_in_cell);\n\t}\n\n\tHIPRT_DEVICE void store_full_reservoir(ReGIRHashGridSoADevice& soa, const ReGIRReservoir& reservoir, int reservoir_index_in_grid)\n\t{\n\t\tif (reservoir.UCW <= 0.0f)\n\t\t{\n\t\t\tsoa.reservoirs.UCW[reservoir_index_in_grid] = reservoir.UCW;\n\t\t\t\n\t\t\t// No need to store the rest if the UCW is invalid, we can already return\n\t\t\treturn;\n\t\t}\n\n\t\tsoa.reservoirs.store_reservoir_opt(reservoir_index_in_grid, reservoir);\n\t\tsoa.samples.store_sample(reservoir_index_in_grid, reservoir.sample);\n\t}\n\n\tHIPRT_DEVICE ReGIRReservoir read_full_reservoir(const ReGIRHashGridSoADevice& soa, unsigned int reservoir_index_in_grid) const\n\t{\n\t\tif (reservoir_index_in_grid == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\treturn ReGIRReservoir();\n\n\t\tReGIRReservoir reservoir;\n\n\t\tfloat UCW = soa.reservoirs.UCW[reservoir_index_in_grid];\n\t\tif (UCW <= 0.0f)\n\t\t{\n\t\t\t// If the reservoir doesn't have a valid sample, not even reading the rest of it\n\t\t\tReGIRReservoir out;\n\t\t\tout.UCW = UCW;\n\n\t\t\treturn out;\n\t\t}\n\n\t\treservoir = soa.reservoirs.read_reservoir<false>(reservoir_index_in_grid);\n\t\treservoir.UCW = UCW;\n\t\treservoir.sample = soa.samples.read_sample(reservoir_index_in_grid);\n\n\t\treturn reservoir;\n\t}\n\n\t/**\n\t * Override if you already have the hash grid cell index\n\t */\n\tHIPRT_DEVICE ReGIRReservoir read_full_reservoir(const ReGIRHashGridSoADevice& soa, unsigned int hash_grid_cell_index, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tunsigned int reservoir_index_in_grid = get_reservoir_index_in_grid(soa, hash_grid_cell_index, reservoir_index_in_cell);\n\n\t\tif (out_invalid_sample)\n\t\t{\n\t\t\tif (reservoir_index_in_grid == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t\t*out_invalid_sample = true;\n\t\t\telse\n\t\t\t\t*out_invalid_sample = false;\n\t\t}\n\n\t\treturn read_full_reservoir(soa, reservoir_index_in_grid);\n\t}\n\n\tHIPRT_DEVICE ReGIRReservoir read_full_reservoir(const ReGIRHashGridSoADevice& soa, const ReGIRHashCellDataSoADevice& hash_cell_data,\n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tunsigned int reservoir_index_in_grid = get_reservoir_index_in_grid(soa, hash_cell_data, world_position, surface_normal, current_camera, roughness, primary_hit, reservoir_index_in_cell);\n\n\t\tif (out_invalid_sample)\n\t\t{\n\t\t\tif (reservoir_index_in_grid == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t\t*out_invalid_sample = true;\n\t\t\telse\n\t\t\t\t*out_invalid_sample = false;\n\t\t}\n\n\t\treturn read_full_reservoir(soa, reservoir_index_in_grid);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_hash_grid_cell_index_from_world_pos(const ReGIRHashGridSoADevice& soa, const ReGIRHashCellDataSoADevice& hash_cell_data, \n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit) const\n\t{\n\t\tunsigned int hash_key;\n\t\tunsigned int hash_grid_cell_index = custom_regir_hash(world_position, surface_normal, current_camera, roughness, primary_hit, soa.m_total_number_of_cells, hash_key);\n\n\t\tif (!HashGrid::resolve_collision<ReGIR_HashGridCollisionResolutionMaxSteps>(hash_cell_data.checksums, soa.m_total_number_of_cells, hash_grid_cell_index, hash_key))\n\t\t\treturn HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\t\telse\n\t\t\treturn hash_grid_cell_index;\n\t}\n\n\tHIPRT_DEVICE float3 jitter_world_position(float3 original_world_position, const HIPRTCamera& current_camera, float roughness, bool primary_hit, Xorshift32Generator& rng, float jittering_radius = 0.5f) const\n\t{\n\t\tfloat3 random_offset = make_float3(rng(), rng(), rng()) * 2.0f - make_float3(1.0f, 1.0f, 1.0f);\n\n\t\treturn original_world_position + random_offset * ReGIRHashGrid::compute_adaptive_cell_size_roughness(original_world_position, current_camera, roughness, primary_hit, m_grid_cell_target_projected_size, m_grid_cell_min_size) * jittering_radius;\n\t}\n\n\tHashGrid m_hash_grid;\n\n\tfloat m_grid_cell_min_size = ReGIR_HashGridConstantGridCellSize ? 0.75f : 0.25f;\n\tfloat m_grid_cell_target_projected_size = 10.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/Representative.h",
    "content": "/*\n* Copyright 2025 Tom Clabault. GNU GPL3 license.\n* GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n*/\n\n#ifndef DEVICE_KERNELS_REGIR_REPRESENTATIVE_H\n#define DEVICE_KERNELS_REGIR_REPRESENTATIVE_H\n \n#include \"Device/includes/ReSTIR/ReGIR/GridFillSurface.h\"\n#include \"Device/includes/ReSTIR/ReGIR/HashGridCellData.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 ReGIR_get_cell_world_normal(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).world_normals[hash_grid_cell_index].unpack();\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 ReGIR_get_cell_world_point(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).world_points[hash_grid_cell_index];\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE int ReGIR_get_cell_primitive_index(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).hit_primitive[hash_grid_cell_index];\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float ReGIR_get_cell_roughness(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\t// / 255.0f to convert from uchar [0, 255] to float [0, 1]\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).roughness[hash_grid_cell_index] / 255.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float ReGIR_get_cell_metallic(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\t// / 255.0f to convert from uchar [0, 255] to float [0, 1]\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).metallic[hash_grid_cell_index] / 255.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float ReGIR_get_cell_specular(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\t// / 255.0f to convert from uchar [0, 255] to float [0, 1]\n\treturn render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).specular[hash_grid_cell_index] / 255.0f;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReGIRGridFillSurface ReGIR_get_cell_surface(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit)\n{\n\tint cell_primitive_index = ReGIR_get_cell_primitive_index(render_data, hash_grid_cell_index, primary_hit);\n\tfloat3 cell_point = ReGIR_get_cell_world_point(render_data, hash_grid_cell_index, primary_hit);\n\tfloat3 cell_normal = ReGIR_get_cell_world_normal(render_data, hash_grid_cell_index, primary_hit);\n\tfloat cell_roughness = ReGIR_get_cell_roughness(render_data, hash_grid_cell_index, primary_hit);\n\tfloat cell_metallic = ReGIR_get_cell_metallic(render_data, hash_grid_cell_index, primary_hit);\n\tfloat cell_specular = ReGIR_get_cell_specular(render_data, hash_grid_cell_index, primary_hit);\n\n\tReGIRGridFillSurface surface;\n\tsurface.cell_primitive_index = cell_primitive_index;\n\tsurface.cell_point = cell_point;\n\tsurface.cell_normal = cell_normal;\n\tsurface.cell_roughness = cell_roughness;\n\tsurface.cell_metallic = cell_metallic;\n\tsurface.cell_specular = cell_specular;\n\n\treturn surface;\n}\n\n/**\n *\tUpdates the representative point and normal (and other data) of the cell at the given shading point\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void ReGIR_update_representative_data(HIPRTRenderData& render_data, float3 shading_point, float3 surface_normal, const HIPRTCamera& current_camera, int primitive_index, bool primary_hit, const DeviceUnpackedEffectiveMaterial& material)\n{\n\tif (DirectLightSamplingBaseStrategy != LSS_BASE_REGIR)\n\t\treturn;\n\telse if (primitive_index == -1)\n\t\treturn;\n\n\t// We're using the packed-unpacked surface normal here because\n\t// packing/unpacking (as used in the G-Buffer) normals introduces\n\t// small differences that are enough to shift us from one cell to\n\t// another.\n\t//\n\t// In practice this leads to the cell being inserted into the hash grid\n\t// with non-packed normals but then when the cell is queried at the first\n\t// during the path tracing kernels (and thus with the packed + unpacked normal\n\t// from the G-Buffer we get different hashing results and we can't find our cells\n\t// back)\n\tsurface_normal = Octahedral24BitNormalPadded32b(surface_normal).unpack();\n\n\trender_data.render_settings.regir_settings.insert_hash_cell_data(shading_point, surface_normal, current_camera, primary_hit, primitive_index, material);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/Reservoir.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_RESERVOIR_H\n#define DEVICE_KERNELS_REGIR_RESERVOIR_H\n\n#include \"HostDeviceCommon/Xorshift.h\"\n#include \"HostDeviceCommon/LightSampleInformation.h\"\n#include \"HostDeviceCommon/Packing.h\"\n\nstruct ReGIRSample\n{\n\tint emissive_triangle_index = -1; // Only needed for ReSTIR DI\n\tfloat3 point_on_light;\n\n\t// Note: the target function isn't stored in the sample SoA, it's just there during the sampling process\n\tfloat target_function = 0.0f;\n};\n\nstruct ReGIRReservoir\n{\n\tstatic constexpr float VISIBILITY_REUSE_KILLED_UCW = -42.0f;\n\tstatic constexpr float UNDEFINED_UCW = -4242.0f;\n\t\n\tHIPRT_DEVICE bool stream_sample_raw(float mis_weight, float target_function, float source_pdf, int emissive_triangle_index, float3 point_on_light, Xorshift32Generator& rng)\n\t{\n\t\tfloat resampling_weight = mis_weight * target_function / source_pdf;\n\n\t\tweight_sum += resampling_weight;\n\n\t\tif (rng() < resampling_weight / weight_sum)\n\t\t{\n\t\t\tsample.emissive_triangle_index = emissive_triangle_index;\n\t\t\tsample.point_on_light = point_on_light;\n\n\t\t\tsample.target_function = target_function;\n\n\t\t\treturn true;\n\t\t}\n\n\t\treturn false;\n\t}\n\n\tHIPRT_DEVICE bool stream_sample(float mis_weight, float target_function, float source_pdf, const LightSampleInformation& light_sample, Xorshift32Generator& rng)\n\t{\n\t\treturn stream_sample_raw(mis_weight, target_function, source_pdf, light_sample.emissive_triangle_index, light_sample.point_on_light, rng);\n\t}\n\n\tHIPRT_DEVICE bool stream_reservoir(float mis_weight, float target_function, const ReGIRReservoir& other_reservoir, Xorshift32Generator& rng)\n\t{\n\t\tfloat resampling_weight = mis_weight * target_function * other_reservoir.UCW;\n\n\t\tif (resampling_weight <= 0.0f)\n\t\t\treturn false;\n\n\t\tweight_sum += resampling_weight;\n\n\t\tif (rng() < resampling_weight / weight_sum)\n\t\t{\n\t\t\tsample = other_reservoir.sample;\n\t\t\tsample.target_function = target_function;\n\n\t\t\treturn true;\n\t\t}\n\n\t\treturn false;\n\t}\n\n\tHIPRT_DEVICE void finalize_resampling(float normalization_numerator, float normalization_denominator)\n\t{\n\t\tif (weight_sum <= 0.0f || normalization_denominator == 0.0f)\n\t\t\tUCW = 0.0f;\n\t\telse\n\t\t\tUCW = 1.0f / sample.target_function * weight_sum * normalization_numerator / normalization_denominator;\n\t}\n\n\tReGIRSample sample;\n\n\tfloat weight_sum = 0.0f;\n\t// If the UCW is set to -1, this is because the reservoir was killed by visibility reuse\n\tfloat UCW = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/ReservoirSoA.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_RESERVOIR_SOA_H\n#define DEVICE_KERNELS_REGIR_RESERVOIR_SOA_H\n\n#include \"Device/includes/ReSTIR/ReGIR/Reservoir.h\"\n\nstruct ReGIRSampleSoADevice\n{\n\tHIPRT_HOST_DEVICE void store_sample(int linear_reservoir_index, const ReGIRSample& sample)\n\t{\n\t\temissive_triangle_index[linear_reservoir_index] = sample.emissive_triangle_index;\n\t\t// random_seed[linear_reservoir_index] = sample.random_seed;\n\t\tpoint_on_light[linear_reservoir_index] = sample.point_on_light;\n\t}\n\n\tHIPRT_HOST_DEVICE ReGIRSample read_sample(int linear_reservoir_index) const\n\t{\n\t\tReGIRSample sample;\n\n\t\tsample.emissive_triangle_index = emissive_triangle_index[linear_reservoir_index];\n\t\t// sample.random_seed = random_seed[linear_reservoir_index];\n\t\tsample.point_on_light = point_on_light[linear_reservoir_index];\n\n\t\treturn sample;\n\t}\n\n\tint* emissive_triangle_index = nullptr;\n\t// Random seed for generating the point on the light\n\t// unsigned int* random_seed = nullptr;\n\tfloat3* point_on_light = nullptr;\n};\n\nstruct ReGIRReservoirSoADevice\n{\n\tHIPRT_HOST_DEVICE void store_reservoir_opt(int linear_reservoir_index, const ReGIRReservoir& reservoir)\n\t{\n\t\tUCW[linear_reservoir_index] = reservoir.UCW;\n\t}\n\n\t/**\n\t * The template parameter can be used to indicate whether or not to read the UCW.\n\t * \n\t * This makes sense to pass this parameter as false if you've already read the UCW\n\t * of the reservoir by some other means\n\t */\n\ttemplate <bool readUCW = true>\n\tHIPRT_HOST_DEVICE ReGIRReservoir read_reservoir(int linear_reservoir_index) const\n\t{\n\t\tReGIRReservoir reservoir;\n\n\t\tif constexpr (readUCW)\n\t\t\treservoir.UCW = UCW[linear_reservoir_index];\n\n\t\treturn reservoir;\n\t}\n\n\tfloat* UCW = nullptr;\n\n\tunsigned int number_of_reservoirs_per_cell = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/Settings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_SETTINGS_H\n#define DEVICE_INCLUDES_REGIR_SETTINGS_H\n\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/ReSTIR/ReGIR/PresampledLight.h\"\n#include \"Device/includes/ReSTIR/ReGIR/ReGIRHashGrid.h\"\n#include \"Device/includes/ReSTIR/ReGIR/HashGridSoADevice.h\"\n#include \"Device/includes/ReSTIR/ReGIR/ReservoirSoA.h\"\n\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\nstruct ReGIRPresampledLightsSoADevice\n{\n\tint* emissive_triangle_index = nullptr;\n\n\tfloat* light_area = nullptr;\n\n\tfloat3* point_on_light = nullptr;\n\n\tOctahedral24BitNormalPadded32b* light_normal = nullptr;\n\n\tFloat3xLengthUint10bPacked* emission = nullptr;\n};\n\nstruct ReGIRGridFillPresampledLights\n{\n\tHIPRT_DEVICE ReGIRPresampledLight sample_one_presampled_light(unsigned int hash_grid_cell_index, unsigned int reservoir_index_in_cell, unsigned int reservoir_count_per_grid_cell, float& out_pdf, Xorshift32Generator& rng) const\n\t{\n\t\t// Computing a subset index in [0, subset_count - 1]\n\t\tunsigned int subset_index_seed = (hash_grid_cell_index * reservoir_count_per_grid_cell + reservoir_index_in_cell) / stratification_size;\n\t\tunsigned int subset_index_random_seed = wang_hash(subset_index_seed) ^ rng.xorshift32();\n\t\tunsigned int random_subset = Xorshift32Generator(subset_index_random_seed).xorshift32() % subset_count;\n\n\t\tunsigned int index_in_subset = (hash_grid_cell_index * reservoir_count_per_grid_cell + reservoir_index_in_cell) % subset_size;\n\n\t\tReGIRPresampledLight sample;\n\t\tsample.emissive_triangle_index = presampled_lights_soa.emissive_triangle_index[random_subset * subset_size + index_in_subset];\n\t\tsample.triangle_area = presampled_lights_soa.light_area[random_subset * subset_size + index_in_subset];\n\t\tsample.point_on_light = presampled_lights_soa.point_on_light[random_subset * subset_size + index_in_subset];\n\t\tsample.normal = presampled_lights_soa.light_normal[random_subset * subset_size + index_in_subset];\n\t\t// sample.emission = presampled_lights_soa.emission[random_subset * subset_size + index_in_subset].unpack_color3x32f();\n\n\t\tout_pdf = 1.0f / subset_count;\n\n\t\treturn sample;\n\t}\n\n\tHIPRT_DEVICE void store_one_presampled_light(const ReGIRPresampledLight& presampled_light, unsigned int presampled_light_index)\n\t{\n\t\tpresampled_lights_soa.emissive_triangle_index[presampled_light_index] = presampled_light.emissive_triangle_index;\n\t\tpresampled_lights_soa.light_area[presampled_light_index] = presampled_light.triangle_area;\n\t\tpresampled_lights_soa.point_on_light[presampled_light_index] = presampled_light.point_on_light;\n\t\tpresampled_lights_soa.light_normal[presampled_light_index] = presampled_light.normal;\n\t\tpresampled_lights_soa.emission[presampled_light_index].pack(presampled_light.emission);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_presampled_light_count() const\n\t{\n\t\treturn subset_count * subset_size;\n\t}\n\n\tReGIRPresampledLightsSoADevice presampled_lights_soa;\n\n\t// How many consecutive reservoirs in the ReGIR grid\n\t// will sample from the same subset of presampled lights?\n\tint stratification_size = 64;\n\t// How many presampled lights per subset\n\tint subset_size = 256;\n\t// How many subsets in total\n\tint subset_count = 128;\n};\n\nstruct ReGIRGridFillSettings\n{\n\tHIPRT_DEVICE ReGIRGridFillSettings() : ReGIRGridFillSettings(true) {}\n\t\t\n\tHIPRT_DEVICE ReGIRGridFillSettings(bool primary_hit)\n\t{\n\t\tlight_sample_count_per_cell_reservoir = 32;\n\n\t\treservoirs_count_per_grid_cell_non_canonical = primary_hit ? 64 : 8;\n\t\treservoirs_count_per_grid_cell_canonical = primary_hit ? 12 : 4;\n\t}\n\n\t// How many light samples are resampled into each reservoir of the grid cell\n\tint light_sample_count_per_cell_reservoir;\n\n\tHIPRT_DEVICE int get_non_canonical_reservoir_count_per_cell() const { return reservoirs_count_per_grid_cell_non_canonical; }\n\tHIPRT_DEVICE int get_canonical_reservoir_count_per_cell() const { return reservoirs_count_per_grid_cell_canonical; }\n\tHIPRT_DEVICE int get_total_reservoir_count_per_cell() const { return reservoirs_count_per_grid_cell_canonical + reservoirs_count_per_grid_cell_non_canonical; }\n\n\tHIPRT_DEVICE int* get_non_canonical_reservoir_count_per_cell_ptr() { return &reservoirs_count_per_grid_cell_non_canonical; }\n\tHIPRT_DEVICE int* get_canonical_reservoir_count_per_cell_ptr() { return &reservoirs_count_per_grid_cell_canonical; }\n\n\tHIPRT_DEVICE bool reservoir_index_in_cell_is_canonical(int reservoir_index_in_cell) const { return reservoir_index_in_cell >= get_non_canonical_reservoir_count_per_cell(); }\n\nprivate:\n\t// How many reservoirs are going to be produced per each cell of the grid.\n\t// \n\t// These reservoirs are \"non-canonical\" as they can include visibility/cosine terms\n\t// if visibility reuse is used\n\t// \n\t// Because these visibility/cosine terms are approximate, using these reservoirs alone\n\t// is going to be biased and so we need to combine them with \"canonical\" reservoirs during\n\t// shading for unbiasedness\n\t//\n\t// In the grid buffers, these reservoirs are stored first, i.e., for a grid cell with 3 non-canonical reservoirs\n\t// and 1 canonical reservoir:\n\t//\n\t// [non-canon, non-canon, non-canon, canonical]\n\tint reservoirs_count_per_grid_cell_non_canonical;\n\n\t// Number of canonical reservoirs per cell\n\t// \n\t// In the grid buffers, these reservoirs are stored last, i.e., for a grid cell with 3 non-canonical reservoirs\n\t// and 1 canonical reservoir:\n\t// \n\t// [non-canon, non-canon, non-canon, canonical]\n\tint reservoirs_count_per_grid_cell_canonical;\n};\n\nstruct ReGIRSpatialReuseSettings\n{\n\tbool do_spatial_reuse = true;\n \t// If true, the same random seed will be used by all grid cells during the spatial reuse for a given frame\n \t// This has the effect of coalescing neighbors memory accesses which improves performance\n\tbool do_coalesced_spatial_reuse = true;\n\n\tint spatial_reuse_pass_count = 2;\n\tint spatial_reuse_pass_index = 0;\n\n\tint spatial_neighbor_count = 3;\n\tint reuse_per_neighbor_count = 3;\n\t// When picking a random cell in the neighborhood for reuse, if that\n\t// cell is out of the grid or if that cell is not alive etc..., we're\n\t// going to retry another cell this many times\n\t//\n\t// This improves the chances that we're actually going to have a good\n\t// neighbor to reuse from --> more reuse --> less variance\n\tint retries_per_neighbor = 4;\n\tint spatial_reuse_radius = 1;\n};\n\nstruct ReGIRCorrelationReductionSettings\n{\n\tbool do_correlation_reduction = true;\n\n\tint correlation_reduction_factor = 2;\n\tint correl_frames_available = 0;\n\tunsigned int correl_reduction_current_grid = 0;\n\n\tReGIRHashGridSoADevice correlation_reduction_grid;\n};\n\nstruct ReGIRSettings\n{\n\tHIPRT_DEVICE bool compute_is_primary_hit(const RayPayload& ray_payload) const\n\t{\n\t\t// We're going to assume that this is still a primary hit grid cell if the path spread is low enough.\n\t\t// This is because a low number of reservoirs are usually used for secondary hit grid cells to lower the cost\n\t\t// of the grid fill while still maintaining good quality because a low number of reservoirs is usually enough\n\t\t// for diffuse bounces. Issues happen when we're looking through a mirror and that mirror is going\n\t\t// to reflect directly the scene lit by a low number of reservoirs and that will show as grid artifacts and correlations.\n\t\t//\n\t\t// So what we need to do here is to still use a high number of reservoirs when looking through mirrors (or\n\t\t// low path spread in general) to avoid those artifacts. We can do that very easily by just assuming that this grid\n\t\t// cell (hit by the mirror bounce) is a first hit grid cell and by assuming that it is a primary hit grid cell,\n\t\t// a higher number of reservoirs will be used for the grid fill and we'll avoid the artifacts.\n\n\t\treturn ray_payload.bounce == 0 || ray_payload.accumulated_roughness < 0.1f;\n\t}\n\n\tHIPRT_DEVICE ReGIRPresampledLight sample_one_presampled_light(unsigned int hash_grid_cell_index, unsigned int reservoir_index_in_cell, bool primary_hit, float& out_pdf, Xorshift32Generator& rng) const\n\t{\n\t\treturn presampled_lights.sample_one_presampled_light(hash_grid_cell_index, reservoir_index_in_cell, get_number_of_reservoirs_per_cell(primary_hit), out_pdf, rng);\n\t}\n\n\tHIPRT_DEVICE const ReGIRHashGridSoADevice& get_initial_reservoirs_grid(bool primary_hit) const { return primary_hit ? initial_reservoirs_primary_hits_grid : initial_reservoirs_secondary_hits_grid; }\n\tHIPRT_DEVICE ReGIRHashGridSoADevice& get_initial_reservoirs_grid(bool primary_hit) { return primary_hit ? initial_reservoirs_primary_hits_grid : initial_reservoirs_secondary_hits_grid; }\n\n\tHIPRT_DEVICE const ReGIRHashGridSoADevice& get_raw_spatial_output_reservoirs_grid(bool primary_hit) const { return primary_hit ? spatial_output_primary_hits_grid : spatial_output_secondary_hits_grid; }\n\tHIPRT_DEVICE ReGIRHashGridSoADevice& get_raw_spatial_output_reservoirs_grid(bool primary_hit) { return primary_hit ? spatial_output_primary_hits_grid : spatial_output_secondary_hits_grid; }\n\n\tHIPRT_DEVICE const ReGIRHashGridSoADevice& get_actual_spatial_output_reservoirs_grid(bool primary_hit) const { return primary_hit ? actual_spatial_output_buffers_primary_hits : actual_spatial_output_buffers_secondary_hits; }\n\n\tHIPRT_DEVICE ReGIRHashGridSoADevice& get_actual_spatial_output_reservoirs_grid(bool primary_hit) { return primary_hit ? actual_spatial_output_buffers_primary_hits : actual_spatial_output_buffers_secondary_hits; }\n\n\tHIPRT_DEVICE const ReGIRHashCellDataSoADevice& get_hash_cell_data_soa(bool primary_hit) const { return primary_hit ? hash_cell_data_primary_hits : hash_cell_data_secondary_hits; }\n\tHIPRT_DEVICE ReGIRHashCellDataSoADevice& get_hash_cell_data_soa(bool primary_hit) { return primary_hit ? hash_cell_data_primary_hits : hash_cell_data_secondary_hits; }\n\n\tHIPRT_DEVICE const ReGIRGridFillSettings& get_grid_fill_settings(bool primary_hit) const { return primary_hit ? grid_fill_settings_primary_hits : grid_fill_settings_secondary_hits; }\n\n\tHIPRT_DEVICE const AtomicType<float>* get_non_canonical_pre_integration_factor_buffer(bool primary_hit) const { return primary_hit ? non_canonical_pre_integration_factors_primary_hits : non_canonical_pre_integration_factors_secondary_hits; }\n\tHIPRT_DEVICE AtomicType<float>* get_non_canonical_pre_integration_factor_buffer(bool primary_hit) { return primary_hit ? non_canonical_pre_integration_factors_primary_hits : non_canonical_pre_integration_factors_secondary_hits; }\n\n\tHIPRT_DEVICE const AtomicType<float>* get_canonical_pre_integration_factor_buffer(bool primary_hit) const { return primary_hit ? canonical_pre_integration_factors_primary_hits : canonical_pre_integration_factors_secondary_hits; }\n\tHIPRT_DEVICE AtomicType<float>* get_canonical_pre_integration_factor_buffer(bool primary_hit) { return primary_hit ? canonical_pre_integration_factors_primary_hits : canonical_pre_integration_factors_secondary_hits; }\n\n\tHIPRT_DEVICE float get_non_canonical_pre_integration_factor(unsigned hash_grid_cell_index, bool primary_hit) const { return get_non_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index]; }\n\tHIPRT_DEVICE float get_canonical_pre_integration_factor(unsigned hash_grid_cell_index, bool primary_hit) const { return get_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index]; }\n\n\t///////////////////// Delegating to the grid for these functions /////////////////////\n\n\tHIPRT_DEVICE float3 get_cell_size(float3 world_position, const HIPRTCamera& current_camera, float roughness, bool primary_hit) const\n\t{\n\t\tfloat cell_size = ReGIRHashGrid::compute_adaptive_cell_size_roughness(world_position, current_camera, roughness, primary_hit, hash_grid.m_grid_cell_target_projected_size, hash_grid.m_grid_cell_min_size);\n\n\t\treturn make_float3(cell_size, cell_size, cell_size);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_hash_grid_cell_index_from_world_pos(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit) const\n\t{\n\t\treturn hash_grid.get_hash_grid_cell_index_from_world_pos(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, roughness, primary_hit);\n\t}\n\n\t///////////////////// Delegating to the grid for these functions /////////////////////\n\n\t/**\n\t * Returns the given reservoir index in the given grid cell index in the given grid of reservoirs\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_reservoir_from_grid_cell_index(ReGIRHashGridSoADevice reservoir_grid, unsigned int hash_grid_cell_index, unsigned int reservoir_index_in_cell)\n\t{\n\t\treturn hash_grid.read_full_reservoir(reservoir_grid, hash_grid.get_reservoir_index_in_grid(reservoir_grid, hash_grid_cell_index, reservoir_index_in_cell));\n\t}\n\n\t/**\n\t * Returns a reservoir from the grid cell that corresponds to the given world position, surface normal.\n\t * The returned reservoir is a non-canonical reservoir given by the non_canonical_reservoir_number.\n\t *\n\t * That number must be in the range [0, get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell()[.\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_cell_non_canonical_reservoir_from_index(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, unsigned int non_canonical_reservoir_number, bool* out_invalid_sample = nullptr) const\n\t{\n\t\treturn get_reservoir_for_shading_from_cell_indices(world_position, surface_normal, current_camera, roughness, primary_hit, non_canonical_reservoir_number, out_invalid_sample);\n\t}\n\n\t/**\n\t * Overlaod if you already the hash grid cell index\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_cell_non_canonical_reservoir_from_index(unsigned int hash_grid_cell_index, bool primary_hit, unsigned int non_canonical_reservoir_number, bool* out_invalid_sample = nullptr) const\n\t{\n\t\treturn get_reservoir_for_shading_from_cell_indices(hash_grid_cell_index, primary_hit, non_canonical_reservoir_number, out_invalid_sample);\n\t}\n\n\t/**\n\t * Same as get_cell_non_canonical_reservoir_from_index() but for canonical reservoirs.\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_cell_canonical_reservoir_from_index(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, unsigned int canonical_reservoir_number, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tunsigned int non_canonical_reservoir_count = get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\n\t\treturn get_reservoir_for_shading_from_cell_indices(world_position, surface_normal, current_camera, roughness, primary_hit, non_canonical_reservoir_count + canonical_reservoir_number, out_invalid_sample);\n\t}\n\n\t/**\n\t * Overlaod if you already the hash grid cell index\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_cell_canonical_reservoir_from_index(unsigned int hash_grid_cell_index, bool primary_hit, unsigned int canonical_reservoir_number, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tunsigned int non_canonical_reservoir_count = get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\n\t\treturn get_reservoir_for_shading_from_cell_indices(hash_grid_cell_index, primary_hit, non_canonical_reservoir_count + canonical_reservoir_number, out_invalid_sample);\n\t}\n\n\tHIPRT_DEVICE ReGIRReservoir get_random_cell_non_canonical_reservoir(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, Xorshift32Generator& rng, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tint random_non_canonical_reservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell());\n\n\t\treturn get_reservoir_for_shading_from_cell_indices(world_position, surface_normal, current_camera, roughness, primary_hit, random_non_canonical_reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\tHIPRT_DEVICE ReGIRReservoir get_random_cell_canonical_reservoir(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, Xorshift32Generator& rng, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tint random_canonical_reservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell());\n\n\t\tunsigned int non_canonical_reservoir_count = get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\t\treturn get_reservoir_for_shading_from_cell_indices(world_position, surface_normal, current_camera, roughness, primary_hit, non_canonical_reservoir_count + random_canonical_reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\t/**\n\t * Overload if you already have the hash grid cell index\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_reservoir_for_shading_from_cell_indices(unsigned int hash_grid_cell_index, bool primary_hit, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tif (spatial_reuse.do_spatial_reuse)\n\t\t\t// If spatial reuse is enabled, we're shading with the reservoirs from the output of the spatial reuse\n\t\t\treturn hash_grid.read_full_reservoir(get_actual_spatial_output_reservoirs_grid(primary_hit), hash_grid_cell_index, reservoir_index_in_cell, out_invalid_sample);\n\t\telse\n\t\t\t// No temporal reuse and no spatial reuse, reading from the output of the grid fill pass\n\t\t\treturn hash_grid.read_full_reservoir(get_initial_reservoirs_grid(primary_hit), hash_grid_cell_index, reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\t/**\n\t * If 'out_invalid_sample' is set to true, then the given shading point (+ the jittering) was outside of the grid\n\t * and no reservoir has been gathered\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_reservoir_for_shading_from_cell_indices(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\tunsigned int hash_grid_cell_index = hash_grid.get_hash_grid_cell_index(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, roughness, primary_hit);\n\n\t\treturn get_reservoir_for_shading_from_cell_indices(hash_grid_cell_index, primary_hit, reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_neighbor_replay_hash_grid_cell_index_for_shading(float3 shading_point, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, bool replay_canonical, bool do_jittering, float jittering_radius, Xorshift32Generator& rng) const\n\t{\n\t\tunsigned int neighbor_cell_index;\n\t\tif (replay_canonical)\n\t\t\tneighbor_cell_index = find_valid_jittered_neighbor_cell_index<true>(shading_point, surface_normal, current_camera, roughness, primary_hit, do_jittering, jittering_radius, rng);\n        else\n            neighbor_cell_index = find_valid_jittered_neighbor_cell_index<false>(shading_point, surface_normal, current_camera, roughness, primary_hit, do_jittering, jittering_radius, rng);\n\n\t\tif (neighbor_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t{\n\t\t\t// Advancing the RNG simulating the random reservoir pick within the grid cell\n\t\t\tif (replay_canonical)\n\t\t\t\trng.random_index(get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell());\n\t\t\telse\n\t\t\t\trng.random_index(get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell());\n\t\t}\n\n\t\treturn neighbor_cell_index;\n\t}\n\n\ttemplate <bool fallbackOnCenterCell>\n\tHIPRT_DEVICE unsigned int find_valid_jittered_neighbor_cell_index(float3 world_position, float3 shading_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, bool do_jittering, float jittering_radius, Xorshift32Generator& rng) const\n\t{\n\t\tunsigned int retry = 0;\n\t\tunsigned int neighbor_grid_cell_index;\n\t\t\n\t\tdo\n\t\t{\n\t\t\tfloat3 jittered;\n\t\t\tif (do_jittering)\n\t\t\t\tjittered = hash_grid.jitter_world_position(world_position, current_camera, roughness, primary_hit, rng, jittering_radius);\n\t\t\telse\n\t\t\t\tjittered = world_position;\n\n\t\t\tneighbor_grid_cell_index = hash_grid.get_hash_grid_cell_index(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), jittered, shading_normal, current_camera, roughness, primary_hit);\n\t\t\tif (neighbor_grid_cell_index != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\t{\n\t\t\t\t// This part here is to avoid race concurrency issues from the Megakernel shader:\n\t\t\t\t//\n\t\t\t\t// In the megakernel, rays that bounce around the scene may hit cells that have never been hit\n\t\t\t\t// before. This will cause these cells to become alive.\n\t\t\t\t//\n\t\t\t\t// When a cell is alive, it may be picked during the megakernel shading with ReGIR.\n\t\t\t\t// However, the cells are only filled during the grid fill pass/spatial reuse pass of ReGIR\n\t\t\t\t//\n\t\t\t\t// What can happen is that the Megakernel sets some grid cells alive and some other threads of the Megakernel then\n\t\t\t\t// tries to use that grid cell for shading (since that grid cell is now alive). This though is that the grid fill pass\n\t\t\t\t// hasn't been launched yet (it will be launched at the next frame) and so the grid cell, even though it's alive, doesn't\n\t\t\t\t// contain valid data --> reading invalid reservoir data for shading\n\t\t\t\t//\n\t\t\t\t// So we're checking here if the cell contains valid data and if it doesn't, we're going to position the cell\n\t\t\t\t// as being invalid with UNDEFINED_HASH_KEY\n\n\t\t\t\tfloat UCW;\n\t\t\t\tif (spatial_reuse.do_spatial_reuse)\n\t\t\t\t\tUCW = get_actual_spatial_output_reservoirs_grid(primary_hit).reservoirs.UCW[neighbor_grid_cell_index * get_number_of_reservoirs_per_cell(primary_hit)];\n\t\t\t\telse\n\t\t\t\t\tUCW = get_initial_reservoirs_grid(primary_hit).reservoirs.UCW[neighbor_grid_cell_index * get_number_of_reservoirs_per_cell(primary_hit)];\n\n\t\t\t\tif (UCW == ReGIRReservoir::UNDEFINED_UCW)\n\t\t\t\t\tneighbor_grid_cell_index = HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\t\t\t}\n\n\t\t\tretry++;\n\t\t} while (neighbor_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX && retry < ReGIR_ShadingJitterTries);\n\n\t\tif (fallbackOnCenterCell && neighbor_grid_cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX && retry == ReGIR_ShadingJitterTries)\n\t\t\t// We couldn't find a valid neighbor and the fallback on center cell is enabled: we're going to return the index of the center cell\n\t\t\tneighbor_grid_cell_index = hash_grid.get_hash_grid_cell_index(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, shading_normal, current_camera, roughness, primary_hit);\n\n\t\treturn neighbor_grid_cell_index;\n\t}\n\n\ttemplate <bool getCanonicalReservoir>\n\tHIPRT_DEVICE ReGIRReservoir get_random_reservoir_in_grid_cell_for_shading(unsigned int grid_cell_index, bool primary_hit, Xorshift32Generator& rng) const\n\t{\n\t\tunsigned int reservoir_index_in_cell;\n\t\t// If this stays to 0, this means that we're going to read the reservoirs from\n\t\t// either the regular initial candidates grid or spatial reuse grid\n\t\t// \n\t\t// If this is > 0, then we're going to read the reservoirs from the supersampling grid\n\t\tunsigned int grid_index = 0;\n\n\t\tif constexpr (getCanonicalReservoir)\n\t\t{\n\t\t\tif (supersampling.do_correlation_reduction)\n\t\t\t{\n\t\t\t\t// If correlation reduction is enabled, we want to pick a reservoir from the whole pool of (regular reservoirs + correlation reduction reservoirs)\n\t\t\t\treservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell() * (supersampling.correl_frames_available + 1));\n\t\t\t}\n\t\t\telse\n\t\t\t\treservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell());\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (supersampling.do_correlation_reduction)\n\t\t\t\t// If correlation reduction is enabled, we want to pick a reservoir from the whole pool of (regular reservoirs + correlation reduction reservoirs)\n\t\t\t\treservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell() * (supersampling.correl_frames_available + 1));\n\t\t\telse\n\t\t\t\treservoir_index_in_cell = rng.random_index(get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell());\n\t\t}\n\n\t\tif constexpr (getCanonicalReservoir)\n\t\t{\n\t\t\tgrid_index = reservoir_index_in_cell / get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell();\n\t\t\treservoir_index_in_cell %= get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell();\n\t\t}\n\t\telse\n\t\t{\n\t\t\tgrid_index = reservoir_index_in_cell / get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\t\t\treservoir_index_in_cell %= get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\t\t}\n\n\t\tunsigned int canonical_offset = getCanonicalReservoir ? get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell() : 0;\n\t\tunsigned int reservoir_index_in_grid = grid_cell_index * get_number_of_reservoirs_per_cell(primary_hit) + canonical_offset + reservoir_index_in_cell;\n\n\t\tif (grid_index == 0 || !primary_hit)\n\t\t{\n\t\t\t// Reading from the regular grids because the grid index is 0 or we're reading\n\t\t\t// secondary hits because we're not doing correlation reduction for secondary hits\n\n\t\t\tif (spatial_reuse.do_spatial_reuse)\n\t\t\t\t// If spatial reuse is enabled, we're shading with the reservoirs from the output of the spatial reuse\n\t\t\t\treturn hash_grid.read_full_reservoir(get_actual_spatial_output_reservoirs_grid(primary_hit), reservoir_index_in_grid);\n\t\t\telse\n\t\t\t\t// No temporal reuse and no spatial reuse, reading from the output of the grid fill pass\n\t\t\t\treturn hash_grid.read_full_reservoir(get_initial_reservoirs_grid(primary_hit), reservoir_index_in_grid);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// If we have grid_index == 1 here for example, this is going to be grid index 0 of the supersampling grid\n\t\t\t// so we have grid_index - 1\n\t\t\tunsigned int reservoir_index_in_supersample_grid = reservoir_index_in_grid + (grid_index - 1) * get_number_of_reservoirs_per_grid(primary_hit);\n\n\t\t\treturn hash_grid.read_full_reservoir(supersampling.correlation_reduction_grid, reservoir_index_in_supersample_grid);\n\t\t}\n\t}\n\n\t/**\n\t * Returns the reservoir indicated by lienar_reservoir_index_in_grid but in the grid_index given\n\t * \n\t * This function only makes sense with temporal reuse where we have more than 1 grid and so a single reservoir index\n\t * isn't enough to fetch the reservoir in the reservoir buffer\n\t * \n\t * The 'grid_index' parameter allows reading from a specific grid of past frames. \n\t * This is index should be in [0, temporal_reuse.temporal_history_length - 1].\n\t * \n\t * If not specified, this function reads from the grid of the current frame\n\t * \n\t * The 'opt' suffix of the function means that the UCW of the reservoir will be read first and the rest of the reservoir\n\t * will only be read if the UCW is > 0.0f.\n\t * If the UCW is <= 0.0f, the returned reservoir will have uninitialized values in all of its fields\n\t */\n\tHIPRT_DEVICE ReGIRReservoir get_temporal_reservoir_opt(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\treturn hash_grid.read_full_reservoir(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, roughness, primary_hit, reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\tHIPRT_DEVICE ReGIRReservoir get_grid_fill_output_reservoir_opt(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell, bool* out_invalid_sample = nullptr) const\n\t{\n\t\t// The output of the grid fill pass is in the current frame grid so we can call the temporal method with\n\t\t// index -1\n\t\treturn get_temporal_reservoir_opt(world_position, surface_normal, current_camera, roughness, primary_hit, reservoir_index_in_cell, out_invalid_sample);\n\t}\n\n\tHIPRT_DEVICE void store_reservoir_custom_buffer_opt(ReGIRHashGridSoADevice& output_reservoirs_grid, const ReGIRReservoir& reservoir, unsigned int hash_grid_cell_index, int reservoir_index_in_cell)\n\t{\n\t\thash_grid.store_reservoir_and_sample_opt(reservoir, output_reservoirs_grid, hash_grid_cell_index, reservoir_index_in_cell);\n\t}\n\n\tHIPRT_DEVICE void store_reservoir_custom_buffer_opt(ReGIRHashGridSoADevice& output_reservoirs_grid, ReGIRHashCellDataSoADevice& output_reservoirs_cell_data, const ReGIRReservoir& reservoir, \n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell)\n\t{\n\t\thash_grid.store_reservoir_and_sample_opt(reservoir, output_reservoirs_grid, output_reservoirs_cell_data, world_position, surface_normal, current_camera, roughness, primary_hit, reservoir_index_in_cell);\n\t}\n\n\t/**\n\t * Overload if you already have the hash grid cell index\n\t */\n\tHIPRT_DEVICE void store_initial_reservoir_opt(ReGIRReservoir reservoir, unsigned int hash_grid_cell_index, bool primary_hit, int reservoir_index_in_cell)\n\t{\n\t\thash_grid.store_reservoir_and_sample_opt(reservoir, get_initial_reservoirs_grid(primary_hit), hash_grid_cell_index, reservoir_index_in_cell);\n\t}\n\n\tHIPRT_DEVICE void store_initial_reservoir_opt(ReGIRReservoir reservoir, float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit, int reservoir_index_in_cell)\n\t{\n\t\thash_grid.store_reservoir_and_sample_opt(reservoir, get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, roughness, primary_hit, reservoir_index_in_cell);\n\t}\n\n\tHIPRT_DEVICE ColorRGB32F get_random_cell_color(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, float roughness, bool primary_hit) const\n\t{\n\t\tunsigned int cell_index = hash_grid.get_hash_grid_cell_index_from_world_pos(get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, roughness, primary_hit);\n\t\tif (cell_index == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t\treturn ColorRGB32F(0.0f);\n\n\t\treturn ColorRGB32F::random_color(cell_index);\n\t}\n\n\tHIPRT_DEVICE unsigned int get_total_number_of_cells_per_grid(bool primary_hit_cells) const\n\t{\n\t\treturn get_initial_reservoirs_grid(primary_hit_cells).m_total_number_of_cells;\n\t}\n\n\tHIPRT_DEVICE unsigned int get_number_of_reservoirs_per_grid(bool primary_hit_cells) const\n\t{\n\t\t// We need to keep this dynamic on the CPU so not using the precomputed variable\n\t\treturn get_total_number_of_cells_per_grid(primary_hit_cells) * get_grid_fill_settings(primary_hit_cells).get_total_reservoir_count_per_cell();\n\t}\n\n\tHIPRT_DEVICE unsigned int get_number_of_reservoirs_per_cell(bool primary_hit_cells) const\n\t{\n\t\t// We need to keep this dynamic on the CPU so not using the precomputed variable\n\t\treturn get_grid_fill_settings(primary_hit_cells).get_total_reservoir_count_per_cell();\n\t}\n\n\tHIPRT_DEVICE static void insert_hash_cell_data(ReGIRHashCellDataSoADevice& hash_cell_data_to_update,\n\t\tunsigned int hash_grid_cell_index, float3 world_position, float3 shading_normal, int primitive_index, const DeviceUnpackedEffectiveMaterial& material)\n\t{\n\t\tif (hippt::atomic_compare_exchange(&hash_cell_data_to_update.hit_primitive[hash_grid_cell_index], ReGIRHashCellDataSoADevice::UNDEFINED_PRIMITIVE, primitive_index) == ReGIRHashCellDataSoADevice::UNDEFINED_PRIMITIVE)\n\t\t{\n\t\t\thash_cell_data_to_update.world_points[hash_grid_cell_index] = world_position;\n\t\t\thash_cell_data_to_update.world_normals[hash_grid_cell_index].pack(shading_normal);\n\t\t\thash_cell_data_to_update.roughness[hash_grid_cell_index] = material.roughness * 255.0f;\n\t\t\thash_cell_data_to_update.metallic[hash_grid_cell_index] = material.metallic * 255.0f;\n\t\t\thash_cell_data_to_update.specular[hash_grid_cell_index] = material.specular * 255.0f;\n\t\t}\n\n\t\t// Because we just inserted into that grid cell, it is now alive\n\t\t// Only go through all that atomic stuff if the cell isn't alive\n\t\tif (hash_cell_data_to_update.grid_cell_alive[hash_grid_cell_index] == 0)\n\t\t{\n\t\t\t// TODO is this atomic needed since we can only be here if the cell was unoccoupied?\n\n\t\t\tif (hippt::atomic_compare_exchange(&hash_cell_data_to_update.grid_cell_alive[hash_grid_cell_index], 0u, 1u) == 0u)\n\t\t\t{\n\t\t\t\tunsigned int cell_alive_index = hippt::atomic_fetch_add(hash_cell_data_to_update.grid_cells_alive_count, 1u);\n\n\t\t\t\thash_cell_data_to_update.grid_cells_alive_list[cell_alive_index] = hash_grid_cell_index;\n\t\t\t}\n\t\t}\n\t}\n\n\tHIPRT_DEVICE static void insert_hash_cell_data_static(\n\t\tconst ReGIRHashGrid& hash_grid, ReGIRHashGridSoADevice& hash_grid_to_update, ReGIRHashCellDataSoADevice& hash_cell_data_to_update,\n\t\tfloat3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, int primitive_index, bool primary_hit, const DeviceUnpackedEffectiveMaterial& material)\n\t{\n\t\tunsigned int checksum;\n\t\tunsigned int hash_grid_cell_index = hash_grid.custom_regir_hash(world_position, surface_normal, current_camera, material.roughness, primary_hit, hash_grid_to_update.m_total_number_of_cells, checksum);\n\t\t// TODO we can have a if (current_hash_key != undefined_key) here to skip some atomic operations\n\t\t\n\t\t// Trying to insert the new key atomically \n\t\tunsigned int existing_checksum = hippt::atomic_compare_exchange(&hash_cell_data_to_update.checksums[hash_grid_cell_index], HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX, checksum);\n\t\tif (existing_checksum != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n\t\t{\n\t\t\t// We tried inserting in our cell but there is something else there already\n\t\t\t\n\t\t\tif (existing_checksum != checksum)\n\t\t\t{\n\t\t\t\t// And it's not our hash so this is a collision\n\n\t\t\t\tunsigned int new_hash_cell_index = hash_grid_cell_index;\n\t\t\t\tif (!HashGrid::resolve_collision<ReGIR_HashGridCollisionResolutionMaxSteps, true>(hash_cell_data_to_update.checksums, hash_grid_to_update.m_total_number_of_cells, new_hash_cell_index, checksum, existing_checksum))\n\t\t\t\t{\n\t\t\t\t\t// Could not resolve the collision\n\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\telse \n\t\t\t\t{\n\t\t\t\t\t// We resolved the collision by finding an empty cell\n\t\t\t\t\thash_grid_cell_index = new_hash_cell_index;\n\n\t\t\t\t\tinsert_hash_cell_data(hash_cell_data_to_update, hash_grid_cell_index, world_position, surface_normal, primitive_index, material);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// We just succeeded the insertion of our key in an empty cell\n\t\t\t\n\t\t\tinsert_hash_cell_data(hash_cell_data_to_update, hash_grid_cell_index, world_position, surface_normal, primitive_index, material);\n\t\t}\n\n\t}\n\n\tHIPRT_DEVICE void insert_hash_cell_data(float3 world_position, float3 surface_normal, const HIPRTCamera& current_camera, bool primary_hit, int primitive_index, const DeviceUnpackedEffectiveMaterial& material)\n\t{\n\t\tReGIRSettings::insert_hash_cell_data_static(hash_grid, get_initial_reservoirs_grid(primary_hit), get_hash_cell_data_soa(primary_hit), world_position, surface_normal, current_camera, primitive_index, primary_hit, material);\n\t}\n\n\t// If true, the ReGIR grid fill and spatial reuse will run in parallel of the\n\t// path tracing kernels. This helps with performance a bit and helps amortize\n\t// the grid fill/spatial reuse cost of ReGIR.\n\t//\n\t// Async compute is only supported with spatial reuse enabled though.\n\tbool do_asynchronous_compute = true;\n\tbool do_light_presampling = ReGIR_GridFillDoLightPresampling;\n\n\tbool DEBUG_CORRELATE_rEGIR = true;\n\tbool DEBUG_DO_RIS_INTEGRAL_NORMALIZATION = true;\n\n\t// How many frames to skip before running the grid fill and spatial reuse passes again\n\t// \n\t// A value of 1 for example means that the grid fill and spatial reuse will be ran at frame 0\n\t// but not at frame 1. And ran at frame 2 but not at frame 3. ...\n\t//\n\t// This amortizes the overhead of ReGIR grid fill / spatial reuse by using the fact that each cell\n\t// contains many reservoirs so the same cell can be used multiple times before all reservoirs have been used\n\t// and new samples are necessary\n\tint frame_skip_primary_hit_grid = 0;\n\tint frame_skip_secondary_hit_grid = 2;\n\n\tReGIRHashGrid hash_grid;\n\n\t// Grid that contains the output reservoirs of the grid fill pass for the primary hits grid cells\n\tReGIRHashGridSoADevice initial_reservoirs_primary_hits_grid;\n\tReGIRHashGridSoADevice initial_reservoirs_secondary_hits_grid;\n\t// Grid that contains the output reservoirs of the spatial reuse pass for the primary hits grid cells\n\tReGIRHashGridSoADevice spatial_output_primary_hits_grid;\n\tReGIRHashGridSoADevice spatial_output_secondary_hits_grid;\n\t// If we have multiple spatial reuse passes or async compute, the output of the spatial reuse passes\n\t// may not simply be in 'spatial_output_primary_hits_grid' (for primary hits) because of buffer ping-ponging\n\t// so instead the actual buffers are in there\n\tReGIRHashGridSoADevice actual_spatial_output_buffers_primary_hits;\n\tReGIRHashGridSoADevice actual_spatial_output_buffers_secondary_hits;\n\n\t// Contains data associated with the primary hits grid cells\n\tReGIRHashCellDataSoADevice hash_cell_data_primary_hits;\n\tReGIRHashCellDataSoADevice hash_cell_data_secondary_hits;\n\n\tReGIRGridFillPresampledLights presampled_lights;\n\tReGIRGridFillSettings grid_fill_settings_primary_hits = ReGIRGridFillSettings(true);\n\tReGIRGridFillSettings grid_fill_settings_secondary_hits = ReGIRGridFillSettings(false);\n\n\tReGIRSpatialReuseSettings spatial_reuse;\n\tReGIRShadingSettings shading;\n\tReGIRCorrelationReductionSettings supersampling;\n\n\tAtomicType<float>* non_canonical_pre_integration_factors_primary_hits = nullptr;\n\tAtomicType<float>* canonical_pre_integration_factors_primary_hits = nullptr;\n\n\tAtomicType<float>* non_canonical_pre_integration_factors_secondary_hits = nullptr;\n\tAtomicType<float>* canonical_pre_integration_factors_secondary_hits = nullptr;\n\n\t// Multiplicative factor to multiply the output of some debug views\n\tfloat debug_view_scale_factor = 0.05f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/ShadingAdditionalInfo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_SHADING_ADDITIONAL_INFO_H\n#define DEVICE_INCLUDES_REGIR_SHADING_ADDITIONAL_INFO_H\n\nstruct ReGIRShadingAdditionalInfo\n{\n\tColorRGB32F sample_radiance = ColorRGB32F(0.0f);\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/ShadingSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_SHADING_SETTINGS_H\n#define DEVICE_INCLUDES_REGIR_SHADING_SETTINGS_H\n\nstruct ReGIRShadingSettings\n{\n\tHIPRT_DEVICE bool get_do_cell_jittering(bool primary_hit) const\n\t{\n\t\treturn primary_hit ? do_cell_jittering_first_hits : do_cell_jittering_secondary_hits;\n\t}\n\n\tint number_of_neighbors = 4;\n\t// At path tracing time, how many reservoirs of the grid cell of the point we're trying to shade\n\t// are going to be resampled (with the BRDF term) to produce the final light sample used for NEE\n\tint reservoir_tap_count_per_neighbor = 1;\n\t// Whether or not to jitter the world space position used when looking up the ReGIR grid\n\t// This helps eliminate grid discretization  artifacts\n\t//\n\t// First hits are for the camera ray hits (i.e. the grid cells visible by the camera)\n\t// Secondary hits are grid cells only found by bouncing around in the scene\n\tbool do_cell_jittering_first_hits = true;\n\tbool do_cell_jittering_secondary_hits = false;\n\t// Radius of jittering when picking reservoirs from neighboring grid cells for shading\n\tfloat jittering_radius = 0.75f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/TargetFunction.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_REGIR_TARGET_FUNCTION_H\n#define DEVICE_INCLUDES_REGIR_TARGET_FUNCTION_H\n\n#include \"Device/includes/BSDFs/BSDFContext.h\"\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/PDFConversion.h\"\n#include \"Device/includes/ReSTIR/ReGIR/GridFillSurface.h\"\n#include \"Device/includes/ReSTIR/ReGIR/VisibilityTest.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\ntemplate <bool includeVisibility, bool withCosineTerm, bool withCosineTermLightSource, bool includeBSDFPrimaryHit, bool includeBSDFSecondaryHit, bool withNeePlusPlusVisibilityEstimation>\nHIPRT_DEVICE float ReGIR_grid_fill_evaluate_target_function(const HIPRTRenderData& render_data, \n\tReGIRGridFillSurface surface, bool primary_hit,\n\tColorRGB32F sample_emission, float3 sample_normal, float3 sample_position, Xorshift32Generator& rng)\n{\n\tfloat3 to_light_direction = sample_position - surface.cell_point;\n\tfloat distance_to_light = hippt::length(to_light_direction);\n\tto_light_direction /= distance_to_light;\n\n\tfloat target_function = sample_emission.luminance() / hippt::square(distance_to_light);\n\tif (surface.cell_primitive_index != -1 && withCosineTerm)\n\t\t// We do have a representative normal, taking the cosine term into account\n\t\ttarget_function *= hippt::max(0.0f, hippt::dot(surface.cell_normal, to_light_direction));\n\n\tif constexpr (withCosineTermLightSource)\n\t\ttarget_function *= compute_cosine_term_at_light_source(sample_normal, -to_light_direction);\n\n\tif (target_function <= 0.0f)\n\t\treturn 0.0f;\n\n\tif ((primary_hit && includeBSDFPrimaryHit) || (!primary_hit && includeBSDFSecondaryHit))\n\t{\n\t\tfloat out_pdf;\n\t\tRayVolumeState empty_volume_state;\n\t\tBSDFIncidentLightInfo out_incident_light_info;\n\n\t\tDeviceUnpackedEffectiveMaterial approximate_material;\n\t\tapproximate_material.roughness = surface.cell_roughness;\n\t\tapproximate_material.metallic = surface.cell_metallic;\n\t\tapproximate_material.specular = surface.cell_specular;\n\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE && DirectLightSamplingBaseStrategy == LSS_BASE_REGIR\n\t\tBSDFContext bsdf_context = BSDFContext(hippt::normalize(render_data.current_camera.position - surface.cell_point), surface.cell_normal, surface.cell_normal, to_light_direction, out_incident_light_info, empty_volume_state, false, approximate_material, 0, 0, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n#else\n\t\tBSDFContext bsdf_context = BSDFContext(hippt::normalize(render_data.current_camera.position - surface.cell_point), surface.cell_normal, surface.cell_normal, to_light_direction, out_incident_light_info, empty_volume_state, false, approximate_material, 0, 0, MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n#endif\n\t\tColorRGB32F bsdf_radiance = bsdf_dispatcher_eval(render_data, bsdf_context, out_pdf, rng);\n\t\ttarget_function *= bsdf_radiance.luminance();\n\t}\n\n\tif constexpr (includeVisibility)\n\t{\n\t\tif (target_function > 0.0f)\n\t\t\t// No need to visibility test if the target function is already 0\n\t\t\ttarget_function *= ReGIR_grid_cell_visibility_test(render_data, surface.cell_point, surface.cell_primitive_index, sample_position, rng);\n\t}\n\telse if constexpr (withNeePlusPlusVisibilityEstimation && DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE)\n\t{\n\t\tNEEPlusPlusContext context;\n\t\tcontext.envmap = false;\n\t\tcontext.point_on_light = sample_position;\n\t\tcontext.shaded_point = surface.cell_point;\n\n\t\ttarget_function *= render_data.nee_plus_plus.estimate_visibility_probability(context, render_data.current_camera);\n\t}\n\n\treturn target_function;\n}\n\nHIPRT_DEVICE float ReGIR_grid_fill_evaluate_non_canonical_target_function(const HIPRTRenderData& render_data, \n\tunsigned int hash_grid_cell_index, bool primary_hit,\n\tColorRGB32F sample_emission, float3 sample_normal, float3 sample_position, Xorshift32Generator& rng)\n{\n\tReGIRGridFillSurface surface = ReGIR_get_cell_surface(render_data, hash_grid_cell_index, primary_hit);\n\n\treturn ReGIR_grid_fill_evaluate_target_function<\n\t\tReGIR_GridFillTargetFunctionVisibility, ReGIR_GridFillTargetFunctionCosineTerm, ReGIR_GridFillTargetFunctionCosineTermLightSource, \n\t\tReGIR_GridFillPrimaryHitsTargetFunctionBSDF, ReGIR_GridFillSecondaryHitsTargetFunctionBSDF, ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation>(\n\t\t\trender_data, surface, primary_hit, sample_emission, sample_normal, sample_position, rng);\n}\n\nHIPRT_DEVICE float ReGIR_grid_fill_evaluate_non_canonical_target_function(const HIPRTRenderData& render_data, \n\tconst ReGIRGridFillSurface& surface, bool primary_hit,\n\tColorRGB32F sample_emission, float3 sample_normal, float3 sample_position, Xorshift32Generator& rng)\n{\n\treturn ReGIR_grid_fill_evaluate_target_function<\n\t\tReGIR_GridFillTargetFunctionVisibility, ReGIR_GridFillTargetFunctionCosineTerm, ReGIR_GridFillTargetFunctionCosineTermLightSource, \n\t\tReGIR_GridFillPrimaryHitsTargetFunctionBSDF, ReGIR_GridFillSecondaryHitsTargetFunctionBSDF, ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation>(\n\t\t\trender_data, surface, primary_hit, sample_emission, sample_normal, sample_position, rng);\n}\n\nHIPRT_DEVICE float ReGIR_grid_fill_evaluate_canonical_target_function(const HIPRTRenderData& render_data, \n\tunsigned int hash_grid_cell_index, bool primary_hit,\n\tColorRGB32F sample_emission, float3 sample_normal, float3 sample_position, Xorshift32Generator& rng)\n{\n\tReGIRGridFillSurface surface = ReGIR_get_cell_surface(render_data, hash_grid_cell_index, primary_hit);\n\n\treturn ReGIR_grid_fill_evaluate_target_function<false, false, false, false, false, false>(\n\t\trender_data, surface, primary_hit, sample_emission, sample_normal, sample_position, rng);\n}\n\nHIPRT_DEVICE float ReGIR_grid_fill_evaluate_canonical_target_function(const HIPRTRenderData& render_data, \n\tconst ReGIRGridFillSurface& surface, bool primary_hit,\n\tColorRGB32F sample_emission, float3 sample_normal, float3 sample_position, Xorshift32Generator& rng)\n{\n\treturn ReGIR_grid_fill_evaluate_target_function<false, false, false, false, false, false>(\n\t\trender_data, surface, primary_hit, sample_emission, sample_normal, sample_position, rng);\n}\n\ntemplate <bool withVisibility, bool withNeePlusPlusVisibilityEstimation>\nHIPRT_DEVICE float ReGIR_shading_evaluate_target_function(const HIPRTRenderData& render_data,\n\tconst float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal,\n\tint last_hit_primitive_index, RayPayload& ray_payload,\n\tconst float3& point_on_light, const float3& light_source_normal,\n\tconst ColorRGB32F& light_emission,\n\tXorshift32Generator& rng,\n\tBSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO)\n{\n\tColorRGB32F trash_radiance;\n\treturn ReGIR_shading_evaluate_target_function<withVisibility, withNeePlusPlusVisibilityEstimation>(render_data, \n\t\tshading_point, view_direction, shading_normal, geometric_normal,\n\t\tlast_hit_primitive_index, ray_payload, \n\t\tpoint_on_light, light_source_normal, \n\t\tlight_emission, \n\t\trng, trash_radiance, \n\t\tincident_light_info);\n}\n\ntemplate <bool withVisibility, bool withNeePlusPlusVisibilityEstimation>\nHIPRT_DEVICE float ReGIR_shading_evaluate_target_function(const HIPRTRenderData& render_data,\n\tconst float3& shading_point, const float3& view_direction, const float3& shading_normal, const float3& geometric_normal,\n\tint last_hit_primitive_index, RayPayload& ray_payload,\n\tconst float3& point_on_light, const float3& light_source_normal,\n\tconst ColorRGB32F& light_emission,\n\tXorshift32Generator& rng, ColorRGB32F& sample_radiance,\n\tBSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO)\n{\n\tfloat3 to_light_direction = point_on_light - shading_point;\n\tfloat distance_to_light = hippt::length(to_light_direction);\n\tto_light_direction /= distance_to_light; // Normalization\n\n\tfloat bsdf_pdf;\n#if ReGIR_ShadingResamplingDoBSDFMIS == KERNEL_OPTION_TRUE && DirectLightSamplingBaseStrategy == LSS_BASE_REGIR\n\tBSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, to_light_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_MIS);\n#else\n\tBSDFContext bsdf_context(view_direction, shading_normal, geometric_normal, to_light_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness, MicrofacetRegularization::RegularizationMode::REGULARIZATION_CLASSIC);\n#endif\n\tColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf, rng);\n\n\tfloat cosine_term = hippt::max(0.0f, hippt::dot(shading_normal, to_light_direction));\n\tfloat geometry_term = compute_cosine_term_at_light_source(light_source_normal, -to_light_direction) / hippt::square(distance_to_light);\n\n\tsample_radiance = bsdf_color * light_emission * cosine_term * geometry_term;\n\n\tfloat target_function = sample_radiance.luminance();\n\tif (target_function <= 0.0f)\n\t\treturn 0.0f;\n\n\tif constexpr (withVisibility)\n\t{\n\t\tif (target_function > 0.0f)\n\t\t{\n\t\t\thiprtRay shadow_ray;\n\t\t\tshadow_ray.origin = shading_point;\n\t\t\tshadow_ray.direction = to_light_direction;\n\n\t\t\tbool in_shadow = evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, last_hit_primitive_index, ray_payload.bounce, rng);\n\t\t\n\t\t\ttarget_function *= !in_shadow;\n\t\t\tsample_radiance *= !in_shadow;\n\t\t}\n\t}\n\telse if constexpr (withNeePlusPlusVisibilityEstimation && DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE)\n\t{\n\t\tNEEPlusPlusContext context;\n\t\tcontext.envmap = false;\n\t\tcontext.point_on_light = point_on_light;\n\t\tcontext.shaded_point = shading_point;\n\n\t\tfloat visibility_proba = render_data.nee_plus_plus.estimate_visibility_probability(context, render_data.current_camera);\n\t\tif (visibility_proba > 0.005f)\n\t\t\tvisibility_proba = 1.0f;\n\t\tvisibility_proba = hippt::max(0.1f, visibility_proba);\n\t\t\t\n\t\ttarget_function *= visibility_proba;\n\t}\n\t\n\treturn target_function;\n}\n\n#endif"
  },
  {
    "path": "src/Device/includes/ReSTIR/ReGIR/VisibilityTest.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_VISIBILITY_TEST_H\n#define DEVICE_KERNELS_REGIR_VISIBILITY_TEST_H\n \n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/ReSTIR/ReGIR/Representative.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n/**\n * Returns false if the given 'point_on_light' is occluded from the point of view of the\n * representative point of the grid cell given by 'hash_grid_cell_index'\n *\n * Returns true if unoccluded\n */\nHIPRT_DEVICE bool ReGIR_grid_cell_visibility_test(const HIPRTRenderData& render_data, float3 representative_point, int representative_primitive_index, float3 point_on_light, Xorshift32Generator& rng)\n{\n    float3 to_light_direction = point_on_light - representative_point;\n    float distance_to_light = hippt::length(to_light_direction);\n    to_light_direction /= distance_to_light;\n\n    hiprtRay shadow_ray;\n    shadow_ray.origin = representative_point;\n    shadow_ray.direction = to_light_direction;\n\n    return !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, representative_primitive_index, 0, rng);\n}\n\nHIPRT_DEVICE bool ReGIR_grid_cell_visibility_test(const HIPRTRenderData& render_data, int hash_grid_cell_index, bool primary_hit, float3 point_on_light, Xorshift32Generator& rng)\n{\n    int representative_primitive_index = ReGIR_get_cell_primitive_index(render_data, hash_grid_cell_index, primary_hit);\n    float3 representative_point = ReGIR_get_cell_world_point(render_data, hash_grid_cell_index, primary_hit);\n\n    return ReGIR_grid_cell_visibility_test(render_data, representative_point, representative_primitive_index, point_on_light, rng);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/SpatialMISWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIAL_MIS_WEIGHT_H\n#define DEVICE_RESTIR_DI_SPATIAL_MIS_WEIGHT_H \n\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight {};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(int reservoir_being_resampled_M)\n\t{\n\t\treturn reservoir_being_resampled_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(int reservoir_being_resampled_M)\n\t{\n\t\treturn reservoir_being_resampled_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data, int reservoir_being_resampled_M)\n\t{\n\t\treturn ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tfloat reservoir_being_resampled_UCW,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& reservoir_being_resampled_sample,\n\n\t\tconst ReSTIRSurface& center_pixel_surface,\n\t\tint current_neighbor_index,\n\t\tint2 center_pixel_coords,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (reservoir_being_resampled_UCW <= 0.0f)\n\t\t\t// Reservoir that doesn't contain any sample, returning \n\t\t\t// 1.0f MIS weight so that multiplying by that doesn't do anything\n\t\t\treturn 1.0f;\n\n\t\tfloat nume = 0.0f;\n\t\tfloat denom = 0.0f;\n\n\t\tunsigned int backup_seed = random_number_generator.m_state.seed;\n\n\t\trandom_number_generator.m_state.seed = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data).spatial_neighbors_rng_seed;\n\n\t\tfor (int j = 0; j < ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data).reuse_neighbor_count + 1; j++)\n\t\t{\n\t\t\tint neighbor_index_j = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, j, center_pixel_coords, random_number_generator);\n\t\t\tif (neighbor_index_j == -1)\n\t\t\t\t// Invalid neighbor, skipping\n\t\t\t\tcontinue;\n\n\t\t\tint center_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data, neighbor_index_j, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface)))\n\t\t\t\t// Neighbor too dissimilar according to heuristics, skipping\n\t\t\t\tcontinue;\n\n\t\t\tReSTIRSurface neighbor_surface = get_pixel_surface(render_data, neighbor_index_j, random_number_generator);\n\n\t\t\tfloat target_function_at_j;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\tif (j == current_neighbor_index)\n\t\t\t\t\ttarget_function_at_j = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility, /* resampling neighbor */ false>(render_data, reservoir_being_resampled_sample, neighbor_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\ttarget_function_at_j = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility, /* resampling neighbor */ true>(render_data, reservoir_being_resampled_sample, neighbor_surface, random_number_generator);\n\n\t\t\t\tif (!reservoir_being_resampled_sample.is_envmap_path())\n\t\t\t\t\t// Applying the jacobian to get \"p_hat_from_i\"\n\t\t\t\t\ttarget_function_at_j *= hippt::max(0.0f, get_jacobian_determinant_reconnection_shift(reservoir_being_resampled_sample.sample_point, reservoir_being_resampled_sample.sample_point_geometric_normal.unpack(), center_pixel_surface.shading_point, neighbor_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold()));\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_j = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, neighbor_surface, random_number_generator);\n\n\t\t\tint M = 1;\n\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\tM = ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_index_j);\n\t\t\tdenom += target_function_at_j * M;\n\t\t\tif (j == current_neighbor_index)\n\t\t\t\tnume = target_function_at_j * M;\n\t\t}\n\n\t\tif (denom == 0.0f)\n\t\t\treturn 0.0f;\n\t\telse\n\t\t\treturn nume / denom;\n\n\t\trandom_number_generator.m_state.seed = backup_seed;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint reservoir_being_resampled_M, float reservoir_being_resampled_target_function, \n\t\tReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample, int center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\t\tReSTIRReservoirType<IsReSTIRGI>& neighbor_pixel_reservoir,\n\n\t\tReSTIRSurface& center_pixel_surface, float target_function_at_center,\n\t\tint neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resampling_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resampling_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\t// The target function of the neighbor reservoir's sample at the neighbor surface is just\n\t\t\t// the target function stored in the neighbor's reservoir.\n\t\t\t//\n\t\t\t// Care must be taken however because this is not necessarily true anymore after multiple spatial\n\t\t\t// reuse passes: a given pixel may now hold a sample from another pixel and that means that the visibility\n\t\t\t// doesn't match anymore.\n\t\t\t//\n\t\t\t// However, this ReSTIR implementation does a visibility reuse pass at the end of each spatial reuse pass\n\t\t\t// so that we know that the visibility is correct and thus we do not run into any issues and we can just\n\t\t\t// reuse the target function stored in the neighbor's reservoir\n\t\t\tfloat target_function_at_neighbor = reservoir_being_resampled_target_function;\n\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : 1;\n\t\t\t// We only want to divide by M-1 if we're not using confidence weights.\n\t\t\t// (Eq. 7.6 and 7.7 of \"A Gentle Introduction to ReSTIR\")\n\t\t\tfloat valid_neighbor_division_term = use_confidence_weights ? 1 : valid_neighbors_count;\n\n\t\t\tfloat nume = target_function_at_neighbor * reservoir_resampled_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\tfloat mi = denom == 0.0f ? 0.0f : (nume / denom);\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\t\t\t\t\t\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal.unpack(), neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc = target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\n\t\t\t\tfloat confidence_weights_multiplier;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t{\n\t\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\t\telse\n\t\t\t\t\t\tconfidence_weights_multiplier = reservoir_resampled_M / neighbors_confidence_sum;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = 1.0f;\n\n\t\t\t\t// (Eq. 7.7 of \"A Gentle Introduction to ReSTIR\"), c_j / (Sum_{k!=c}^M c_k)\n\t\t\t\tif (denom_mc != 0.0f)\n\t\t\t\t\tmc += nume_mc / denom_mc / valid_neighbor_division_term * confidence_weights_multiplier;\n\t\t\t}\n\n\t\t\treturn mi / valid_neighbor_division_term;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint reservoir_being_resampled_M, float reservoir_being_resampled_target_function,\n\t\tReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample, int center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\t\tReSTIRReservoirType<IsReSTIRGI>& neighbor_pixel_reservoir,\n\n\t\tReSTIRSurface& center_pixel_surface, float target_function_at_center,\n\t\tint neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resampling_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resampling_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\t// The target function of the neighbor reservoir's sample at the neighbor surface is just\n\t\t\t// the target function stored in the neighbor's reservoir.\n\t\t\t//\n\t\t\t// Care must be taken however because this is not necessarily true anymore after multiple spatial\n\t\t\t// reuse passes: a given pixel may now hold a sample from another pixel and that means that the visibility\n\t\t\t// doesn't match anymore.\n\t\t\t//\n\t\t\t// However, this ReSTIR DI implementation does a visibility reuse pass at the end of each spatial reuse pass\n\t\t\t// so that we know that the visibility is correct and thus we do not run into any issues and we can just\n\t\t\t// reuse the target function stored in the neighbor's reservoir\n\t\t\tfloat target_function_at_neighbor = reservoir_being_resampled_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : 1;\n\t\t\t// We only want to divide by M-1 if we're not using confidence weights.\n\t\t\t// (Eq. 7.6 and 7.7 of \"A Gentle Introduction to ReSTIR\")\n\t\t\tfloat valid_neighbor_division_term = use_confidence_weights ? 1 : valid_neighbors_count;\n\n\t\t\tfloat nume = target_function_at_neighbor * reservoir_resampled_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\tfloat mi = denom == 0.0f ? 0.0f : (nume / denom);\n\t\t\tif (use_confidence_weights)\n\t\t\t\tmi *= neighbors_confidence_sum / (neighbors_confidence_sum + center_reservoir_M);\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\t// There's one case where we do not need to update 'mc': when the center pixel (that we're currently resampling) is empty: M = 0 / UCW = 0\n\t\t\t\t// That's because in such cases, the empty reservoir will not be resampled into the final reservoir anyways since it has no contribution\n\t\t\t\t// Because 'mc' is only used as the MIS weight of the center reservoir, we don't care about 'mc' since the center reservoir is not going\n\t\t\t\t// to be chosen anyways\n\t\t\t\t//\n\t\t\t\t// So we can avoid computing all that stuff\n\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal.unpack(), neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\t\tfloat nume_mc = target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat confidence_multiplier = 1.0f;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t\tconfidence_multiplier = reservoir_resampled_M / (center_reservoir_M + neighbors_confidence_sum);\n\t\t\t\tif (denom_mc != 0.0f)\n\t\t\t\t\tmc += nume_mc / denom_mc * confidence_multiplier;\n\t\t\t}\n\n\t\t\tif (use_confidence_weights)\n\t\t\t\treturn mi;\n\t\t\telse\n\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\")\n\t\t\t\t//\n\t\t\t\t// We also only want that division when not using confidence weights\n\t\t\t\treturn mi / (valid_neighbors_count + 1.0f);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\n\t\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\t\treturn mc + static_cast<float>(center_pixel_reservoir_M) / static_cast<float>(center_pixel_reservoir_M + valid_neighbors_M_sum);\n\t\t\t\telse\n\t\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\") so 'valid_neighbors_count + 1'\n\t\t\t\t\treturn (1 + mc) / (valid_neighbors_count + 1.0f);\n\t\t\t}\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tint reservoir_being_resampled_M, float reservoir_being_resampled_target_function,\n\t\tReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample, int center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\t\tReSTIRReservoirType<IsReSTIRGI>& neighbor_pixel_reservoir,\n\n\t\tReSTIRSurface& center_pixel_surface, float target_function_neighbor_sample_at_center,\n\t\tint neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resampling_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resampling_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = reservoir_being_resampled_target_function;\n\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : valid_neighbors_count;\n\n\t\t\t// Eq. 15 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(target_function_neighbor_sample_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi = difference_function * reservoir_resampled_M;\n\t\t\tfloat denom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal.unpack(), neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc = center_reservoir_M;\n\t\t\t\tfloat denom_mc = center_reservoir_M + neighbors_confidence_sum * symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\n\t\t\t\tfloat confidence_weights_multiplier;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t{\n\t\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\t\telse\n\t\t\t\t\t\tconfidence_weights_multiplier = reservoir_resampled_M / neighbors_confidence_sum;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = 1.0f / valid_neighbors_count;\n\n\t\t\t\tmc += confidence_weights_multiplier * nume_mc / denom_mc;\n\t\t\t}\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tint reservoir_being_resampled_M, float reservoir_being_resampled_target_function,\n\t\tReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample, int center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\t\tReSTIRReservoirType<IsReSTIRGI>& neighbor_pixel_reservoir,\n\n\t\tReSTIRSurface& center_pixel_surface, float target_function_neighbor_sample_at_center,\n\t\tint neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resampling_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resampling_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = reservoir_being_resampled_target_function;\n\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : valid_neighbors_count;\n\n\t\t\t// Eq. 16 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(target_function_neighbor_sample_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi, denom_mi;\n\n\t\t\t// Eq. 16 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tif (target_function_neighbor_sample_at_center <= target_function_neighbor_sample_at_neighbor)\n\t\t\t{\n\t\t\t\tnume_mi = difference_function * reservoir_resampled_M;\n\t\t\t\tdenom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tnume_mi = difference_function * reservoir_resampled_M;\n\t\t\t\tdenom_mi = center_reservoir_M + neighbors_confidence_sum;\n\t\t\t}\n\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal.unpack(), neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc, denom_mc;\n\n\t\t\t\tfloat difference_function_mc = symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\t\tif (target_function_center_sample_at_center <= target_function_center_sample_at_neighbor)\n\t\t\t\t{\n\t\t\t\t\tnume_mc = difference_function_mc * reservoir_resampled_M;\n\t\t\t\t\tdenom_mc = center_reservoir_M + neighbors_confidence_sum * difference_function_mc;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tnume_mc = difference_function_mc * reservoir_resampled_M;\n\t\t\t\t\tdenom_mc = center_reservoir_M + neighbors_confidence_sum;\n\t\t\t\t}\n\n\t\t\t\tmc += nume_mc / denom_mc;\n\t\t\t}\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\t//\n\t\t\t\t// This is Eq. 16 of the paper: y not in R: m_i(y) = 1 - Sum(...) / |R|\n\t\t\t\t// mc here is the sum\n\t\t\t\t// and |R| is 1\n\t\t\t\treturn 1.0f - mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/SpatialNormalizationWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIAL_NORMALIZATION_WEIGHT_H\n#define DEVICE_RESTIR_DI_SPATIAL_NORMALIZATION_WEIGHT_H\n\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight {};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tfloat final_reservoir_weight_sum, const ReSTIRSurface& center_pixel_surface,\n\t\tint2 center_pixel_coords, float& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0.0f)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\t// 1/M MIS weights are basically confidence weights only i.e. c_i / sum(c_j) with\n\t\t// c_i = r_i.M\n\n\t\tout_normalization_nume = 1.0f;\n\t\t// We're simply going to divide by the sum of all the M values of all the neighbors we resampled (including the center pixel)\n\t\t// so we're only going to set the denominator to that and the numerator isn't going to change\n\t\tout_normalization_denom = 0.0f;\n\n\t\tfor (int neighbor = 0; neighbor < ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data).reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, center_pixel_coords, random_number_generator);\n\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t// Neighbor out of the viewport\n\t\t\t\tcontinue;\n\n\t\t\tint center_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\tneighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface)))\n\t\t\t\tcontinue;\n\n\t\t\tout_normalization_denom += ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\t}\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum,\n\t\tconst ReSTIRSurface& center_pixel_surface,\n\t\tint2 center_pixel_coords,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\t// Checking how many of our neighbors could have produced the sample that we just picked\n\t\t// and we're going to divide by the sum of M values of those neighbors\n\t\tout_normalization_denom = 0.0f;\n\t\tout_normalization_nume = 1.0f;\n\n\t\tint center_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\n\t\trandom_number_generator.m_state.seed = spatial_pass_settings.spatial_neighbors_rng_seed;\n\n\t\tfor (int neighbor = 0; neighbor < spatial_pass_settings.reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, center_pixel_coords, random_number_generator);\n\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t// Invalid neighbor\n\t\t\t\tcontinue;\n\n\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\tneighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface)))\n\t\t\t\tcontinue;\n\n\t\t\t// Getting the surface data at the neighbor\n\t\t\tReSTIRSurface neighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\tfloat target_function_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\t\n\t\t\t\t// ReSTIR GI target function\n\n\t\t\t\tfloat jacobian = 1.0f;\n\t\t\t\tif (!final_reservoir_sample.is_envmap_path())\n\t\t\t\t\tjacobian = get_jacobian_determinant_reconnection_shift(final_reservoir_sample.sample_point, final_reservoir_sample.sample_point_geometric_normal, center_pixel_surface.shading_point, neighbor_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\ttarget_function_at_neighbor = jacobian * ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility, true>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\n\t\t\tif (target_function_at_neighbor > 0.0f)\n\t\t\t\t// If the neighbor could have produced this sample...\n\t\t\t\tout_normalization_denom += ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\t}\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum, \n\t\tconst ReSTIRSurface& center_pixel_surface,\n\t\tint selected_neighbor,\n\t\tint2 center_pixel_coords,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\tout_normalization_denom = 0.0f;\n\t\tout_normalization_nume = 0.0f;\n\n\t\trandom_number_generator.m_state.seed = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data).spatial_neighbors_rng_seed;\n\n\t\tfor (int neighbor = 0; neighbor < ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data).reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, center_pixel_coords, random_number_generator);\n\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t// Invalid neighbor\n\t\t\t\tcontinue;\n\n\t\t\tint center_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\tneighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface)))\n\t\t\t\tcontinue;\n\n\t\t\t// Getting the surface data at the neighbor\n\t\t\tReSTIRSurface neighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\tfloat target_function_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\n\t\t\t\tif (!final_reservoir_sample.is_envmap_path())\n\t\t\t\t\t// Applying the jacobian to get \"p_hat_from_i\"\n\t\t\t\t\ttarget_function_at_neighbor *= hippt::max(0.0f, get_jacobian_determinant_reconnection_shift(final_reservoir_sample.sample_point, final_reservoir_sample.sample_point_geometric_normal, center_pixel_surface.shading_point, neighbor_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold()));\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\n\t\t\tif (target_function_at_neighbor > 0.0f)\n\t\t\t{\n\t\t\t\tint M = 1;\n\t\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\t\tM = ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\n\t\t\t\tif (neighbor == selected_neighbor)\n\t\t\t\t\t// Not multiplying by M here, this was done already when resampling the sample if we\n\t\t\t\t\t// we're using confidence weights\n\t\t\t\t\tout_normalization_nume = target_function_at_neighbor;\n\t\t\t\tout_normalization_denom += target_function_at_neighbor * M;\n\t\t\t};\n\t\t}\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors with balance heuristic MIS weights in the m_i terms\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled by the MIS weights when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled by the MIS weights when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled by the MIS weights when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatialNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled by the MIS weights when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/SpatiotemporalMISWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIOTEMPORAL_MIS_WEIGHT_H\n#define DEVICE_RESTIR_DI_SPATIOTEMPORAL_MIS_WEIGHT_H\n\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n\n#define TEMPORAL_NEIGHBOR_ID 0\n\n/**\n * There are going to be many specialization of this structure, one for each bias correction mode\n * (RESTIR_DI_BIAS_CORRECTION_1_OVER_M, RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, ...)\n *\n * IsReSTIRGI is used to indicate whether the structure should be used to compute\n * for ReSTIR GI or for ReSTIR DI\n */\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI = false>\nstruct ReSTIRSpatiotemporalResamplingMISWeight {};\n\ntemplate <>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(int reservoir_being_resampled_M)\n\t{\n\t\treturn reservoir_being_resampled_M;\n\t}\n};\n\ntemplate <>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(int reservoir_being_resampled_M)\n\t{\n\t\treturn reservoir_being_resampled_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data, int reservoir_being_resampled_M)\n\t{\n\t\treturn ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t}\n}; \n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tfloat reservoir_being_resampled_UCW,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& reservoir_being_resampled_sample,\n\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\t\tint current_neighbor, int initial_candidates_M, int temporal_neighbor_M,\n\t\tint center_pixel_index, int2 temporal_neighbor_coords,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (reservoir_being_resampled_UCW <= 0.0f)\n\t\t\t// Reservoir that doesn't contain any sample, returning \n\t\t\t// 1.0f MIS weight so that multiplying by that doesn't do anything\n\t\t\treturn 1.0f;\n\n\t\tfloat nume = 0.0f;\n\t\tfloat denom = 0.0f;\n\n\t\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t\tfor (int j = 0; j < spatial_pass_settings.reuse_neighbor_count + 1; j++)\n\t\t{\n\t\t\t// The last iteration of the loop is a special case that resamples the initial candidates reservoir\n\t\t\t// and so neighbor_pixel_index is never going to be used so we don't need to set it\n\t\t\tint neighbor_index_j;\n\t\t\tif (j != spatial_pass_settings.reuse_neighbor_count)\n\t\t\t{\n\t\t\t\tneighbor_index_j = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, j, temporal_neighbor_coords, random_number_generator);\n\t\t\t\tif (neighbor_index_j == -1)\n\t\t\t\t\t// Invalid neighbor, skipping\n\t\t\t\t\tcontinue;\n\n\t\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\t\tneighbor_index_j, center_pixel_index, \n\t\t\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface), render_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\t\t\t// Neighbor too dissimilar according to heuristics, skipping\n\t\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\tReSTIRSurface neighbor_surface;\n\t\t\tif (j == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\tneighbor_surface = center_pixel_surface;\n\t\t\telse\n\t\t\t\tneighbor_surface = get_pixel_surface(render_data, neighbor_index_j, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\t\t\tfloat target_function_at_j;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_at_j = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_j = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, neighbor_surface, random_number_generator);\n\n\t\t\tint M = 1;\n\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t{\n\t\t\t\tif (j == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\t\tM = initial_candidates_M;\n\t\t\t\telse\n\t\t\t\t\tM = ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_index_j);\n\t\t\t}\n\n\t\t\tdenom += target_function_at_j * M;\n\t\t\t// Using + 1 here because for the spatial neighbors, we want to start at index 1,\n\t\t\t// not 0 because it is the temporal neighbor that has index 0\n\t\t\tif (j + 1 == current_neighbor)\n\t\t\t\tnume = target_function_at_j * M;\n\t\t}\n\n\t\t// Taking the temporal neighbor into account\n\t\tfloat target_function_at_temporal_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, temporal_neighbor_surface, random_number_generator);\n\t\tint M = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights ? temporal_neighbor_M : 1;\n\n\t\tdenom += target_function_at_temporal_neighbor * M;\n\t\tif (current_neighbor == TEMPORAL_NEIGHBOR_ID)\n\t\t\tnume = target_function_at_temporal_neighbor * M;\n\n\t\tif (denom == 0.0f)\n\t\t\treturn 0.0f;\n\t\telse\n\t\t\treturn nume / denom;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint reservoir_being_resampled_M, float reservoir_being_resampled_target_function,\n\t\tint center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample,\n\n\t\tfloat target_function_at_center, int neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resample_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resample_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\t// The target function of the neighbor reservoir's sample at the neighbor surface is just\n\t\t\t// the target function stored in the neighbor's reservoir.\n\t\t\t//\n\t\t\t// Care must be taken however because this is not necessarily true anymore after multiple spatial\n\t\t\t// reuse passes: a given pixel may now hold a sample from another pixel and that means that the visibility\n\t\t\t// doesn't match anymore.\n\t\t\t//\n\t\t\t// However, this ReSTIR DI implementation does a visibility reuse pass at the end of each spatial reuse pass\n\t\t\t// so that we know that the visibility is correct and thus we do not run into any issues and we can just$\n\t\t\t// reuse the target function stored in the neighbor's reservoir\n\t\t\tfloat target_function_at_neighbor = reservoir_being_resampled_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? reservoir_being_resampled_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : 1;\n\t\t\t// We only want to divide by M-1 if we're not using confidence weights.\n\t\t\t// (Eq. 7.6 and 7.7 of \"A Gentle Introduction to ReSTIR\")\n\t\t\tfloat valid_neighbor_division_term = use_confidence_weights ? 1 : valid_neighbors_count;\n\n\t\t\tfloat nume = target_function_at_neighbor * reservoir_resampled_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\tfloat mi = denom == 0.0f ? 0.0f : (nume / denom);\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\t\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\t\tfloat nume_mc = target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\n\t\t\t\t// (Eq. 7.7 of \"A Gentle Introduction to ReSTIR\"), c_j / (Sum_{k!=c}^M c_k)\n\t\t\t\tfloat confidence_weights_multiplier = use_confidence_weights ? reservoir_resampled_M / neighbors_confidence_sum : 1;\n\t\t\t\tif (denom_mc != 0.0f)\n\t\t\t\t\tmc += nume_mc / denom_mc / valid_neighbor_division_term * confidence_weights_multiplier;\n\t\t\t}\n\n\t\t\treturn mi / valid_neighbor_division_term;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint resampled_reservoir_M, float resampled_reservoir_target_function,\n\t\tint center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample,\n\n\t\tfloat target_function_at_center, int neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resample_canonical, \n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resample_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\t// The target function of the neighbor reservoir's sample at the neighbor surface is just\n\t\t\t// the target function stored in the neighbor's reservoir.\n\t\t\t//\n\t\t\t// Care must be taken however because this is not necessarily true anymore after multiple spatial\n\t\t\t// reuse passes: a given pixel may now hold a sample from another pixel and that means that the visibility\n\t\t\t// doesn't match anymore.\n\t\t\t//\n\t\t\t// However, this ReSTIR DI implementation does a visibility reuse pass at the end of each spatial reuse pass\n\t\t\t// so that we know that the visibility is correct and thus we do not run into any issues and we can just\n\t\t\t// reuse the target function stored in the neighbor's reservoir\n\t\t\tfloat target_function_at_neighbor = resampled_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? resampled_reservoir_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : 1;\n\t\t\t// We only want to divide by M-1 if we're not using confidence weights.\n\t\t\t// (Eq. 7.6 and 7.7 of \"A Gentle Introduction to ReSTIR\")\n\t\t\tfloat valid_neighbor_division_term = use_confidence_weights ? 1 : valid_neighbors_count;\n\n\t\t\tfloat nume = target_function_at_neighbor * reservoir_resampled_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\tfloat mi = 0.0f;\n\t\t\tif (denom != 0.0f)\n\t\t\t\tmi = nume / denom;\n\t\t\tif (use_confidence_weights)\n\t\t\t\tmi *= neighbors_confidence_sum / (neighbors_confidence_sum + center_reservoir_M);\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\t// There's one case where we do not need to update 'mc': when the center pixel (that we're currently resampling) is empty: M = 0 / UCW = 0\n\t\t\t\t// That's because is such cases, the empty reservoir will not be resampled into the final reservoir anyways since it has no contribution\n\t\t\t\t// Because 'mc' is only used as the MIS weight of the center reservoir, we don't care about 'mc' since the center reservoir is not going\n\t\t\t\t// to be chosen anyways\n\t\t\t\t//\n\t\t\t\t// So we can avoid computing all that stuff\n\n\t\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\t\t// TODO are we loading this surface again where the caller had it already?\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc = target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center / valid_neighbor_division_term * center_reservoir_M;\n\t\t\t\tfloat confidence_multiplier = 1.0f;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t\tconfidence_multiplier = reservoir_resampled_M / (center_reservoir_M + neighbors_confidence_sum);\n\t\t\t\tif (denom_mc != 0.0f)\n\t\t\t\t\tmc += nume_mc / denom_mc * confidence_multiplier;\n\t\t\t}\n\n\t\t\tif (use_confidence_weights)\n\t\t\t\treturn mi;\n\t\t\telse\n\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\")\n\t\t\t\t//\n\t\t\t\t// We also only want that division when not using confidence weights\n\t\t\t\treturn mi / (valid_neighbors_count + 1.0f);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\n\t\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\t\treturn mc + static_cast<float>(center_pixel_reservoir_M) / static_cast<float>(center_pixel_reservoir_M + valid_neighbors_M_sum);\n\t\t\t\telse\n\t\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\") so 'valid_neighbors_count + 1'\n\t\t\t\t\treturn (1 + mc) / (valid_neighbors_count + 1.0f);\n\t\t\t}\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint resampled_reservoir_M, float resampled_reservoir_target_function,\n\t\tint center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample,\n\t\tconst ReSTIRSurface& center_pixel_surface,\n\n\t\tfloat target_function_at_center, int neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resample_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resample_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = resampled_reservoir_target_function;\n\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? resampled_reservoir_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : valid_neighbors_count;\n\n\t\t\t// Eq. 15 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(target_function_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi = difference_function * reservoir_resampled_M;\n\t\t\tfloat denom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal, neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc = center_reservoir_M;\n\t\t\t\tfloat denom_mc = center_reservoir_M + neighbors_confidence_sum * symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\n\t\t\t\tfloat confidence_weights_multiplier;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t{\n\t\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\t\telse\n\t\t\t\t\t\tconfidence_weights_multiplier = reservoir_resampled_M / neighbors_confidence_sum;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = 1.0f / valid_neighbors_count;\n\n\t\t\t\tmc += confidence_weights_multiplier * nume_mc / denom_mc;\n\t\t\t}\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tint resampled_reservoir_M, float resampled_reservoir_target_function,\n\t\tint center_pixel_reservoir_M, float center_pixel_reservoir_target_function,\n\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& center_pixel_reservoir_sample,\n\t\tconst ReSTIRSurface& center_pixel_surface,\n\n\t\tfloat target_function_at_center, int neighbor_pixel_index, int valid_neighbors_count, int valid_neighbors_M_sum,\n\t\tbool update_mc, bool resample_canonical,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (!resample_canonical)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = resampled_reservoir_target_function;\n\t\t\tfloat target_function_center_sample_at_center = center_pixel_reservoir_target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat reservoir_resampled_M = use_confidence_weights ? resampled_reservoir_M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? center_pixel_reservoir_M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? valid_neighbors_M_sum : valid_neighbors_count;\n\n\t\t\t// Eq. 15 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(target_function_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi = difference_function * reservoir_resampled_M;\n\t\t\tfloat denom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tif (update_mc)\n\t\t\t{\n\t\t\t\tReSTIRSurface neighbor_pixel_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t{\n\t\t\t\t\t// ReSTIR GI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t\t{\n\t\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\t\tif (!center_pixel_reservoir_sample.is_envmap_path())\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(center_pixel_reservoir_sample.sample_point, center_pixel_reservoir_sample.sample_point_geometric_normal, neighbor_pixel_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\t\telse\n\t\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\t// ReSTIR DI target function\n\t\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, center_pixel_reservoir_sample, neighbor_pixel_surface, random_number_generator);\n\n\t\t\t\tfloat nume_mc = center_reservoir_M;\n\t\t\t\tfloat denom_mc = center_reservoir_M + neighbors_confidence_sum * symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\n\t\t\t\tfloat confidence_weights_multiplier;\n\t\t\t\tif (use_confidence_weights)\n\t\t\t\t{\n\t\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\t\telse\n\t\t\t\t\t\tconfidence_weights_multiplier = reservoir_resampled_M / neighbors_confidence_sum;\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = 1.0f / valid_neighbors_count;\n\n\t\t\t\tmc += confidence_weights_multiplier * nume_mc / denom_mc;\n\t\t\t}\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\n#endif"
  },
  {
    "path": "src/Device/includes/ReSTIR/SpatiotemporalNormalizationWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIOTEMPORAL_NORMALIZATION_WEIGHT_H\n#define DEVICE_RESTIR_DI_SPATIOTEMPORAL_NORMALIZATION_WEIGHT_H\n\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\"\n#include \"Device/includes/ReSTIR/NeighborSimilarity.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n\n#define TEMPORAL_NEIGHBOR_ID 0\n\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight {};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tfloat final_reservoir_weight_sum, int initial_candidates_reservoir_M,\n\t\tconst ReSTIRSurface& center_pixel_surface,\n\t\tint temporal_neighbor_M, int center_pixel_index, int2 temporal_neighbor_coords,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0.0f)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\t// 1/M MIS weights are basically confidence weights only i.e. c_i / sum(c_j) with\n\t\t// c_i = r_i.M\n\n\t\tout_normalization_nume = 1.0f;\n\t\t// We're simply going to divide by the sum of all the M values of all the neighbors we resampled (including the center pixel)\n\t\t// so we're only going to set the denominator to that and the numerator isn't going to change\n\t\tout_normalization_denom = 0.0f;\n\n\t\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t\tfor (int neighbor = 0; neighbor < spatial_pass_settings.reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\t// The last iteration of the loop is a special case that resamples the initial candidates reservoir\n\t\t\t// and so neighbor_pixel_index is never going to be used so we don't need to set it\n\t\t\tint neighbor_pixel_index;\n\t\t\tif (neighbor != spatial_pass_settings.reuse_neighbor_count)\n\t\t\t{\n\t\t\t\tneighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, temporal_neighbor_coords, random_number_generator);\n\n\t\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t\t// Neighbor out of the viewport\n\t\t\t\t\tcontinue;\n\n\t\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\t\tneighbor_pixel_index, center_pixel_index, \n\t\t\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface), render_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Getting the surface data at the neighbor\n\t\t\t// \n\t\t\t// The surface at the center pixel passed in parameters is \n\t\t\t// the surface in the current frame, that's what we want\n\t\t\t// since we're resampling initial candidates of the current\n\t\t\t// frame in the center pixel. We're not resampling the center\n\t\t\t// pixel from the previous frame so we need the current surface \n\t\t\tReSTIRSurface neighbor_surface;\n\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\tneighbor_surface = center_pixel_surface;\n\t\t\telse\n\t\t\t\tneighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\tout_normalization_denom += initial_candidates_reservoir_M;\n\t\t\telse\n\t\t\t\tout_normalization_denom += ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\t}\n\n\t\t// The fused spatiotemporal pass also resamples a temporal neighbor so we add the M of that neighbor too\n\t\tout_normalization_denom += temporal_neighbor_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum, \n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\t\tint center_pixel_M, int temporal_neighbor_M, int center_pixel_index, int2 temporal_neighbor_position, float& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\t// Checking how many of our neighbors could have produced the sample that we just picked\n\t\t// and we're going to divide by the sum of M values of those neighbors\n\t\tout_normalization_denom = 0.0f;\n\t\tout_normalization_nume = 1.0f;\n\n\t\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t\tfor (int neighbor = 0; neighbor < spatial_pass_settings.reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\t// The last iteration of the loop is a special case that resamples the initial candidates reservoir\n\t\t\t// and so neighbor_pixel_index is never going to be used so we don't need to set it\n\t\t\tint neighbor_pixel_index;\n\t\t\tif (neighbor != spatial_pass_settings.reuse_neighbor_count)\n\t\t\t{\n\t\t\t\tneighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, temporal_neighbor_position, random_number_generator);\n\n\t\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t\t// Invalid neighbor\n\t\t\t\t\tcontinue;\n\n\t\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\t\tneighbor_pixel_index, center_pixel_index, \n\t\t\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface), render_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\t\t\tcontinue;\n\t\t\t}\n\n\n\t\t\t// Getting the surface data at the neighbor\n\t\t\t// \n\t\t\t// The surface at the center pixel passed in parameters is \n\t\t\t// the surface in the current frame, that's what we want\n\t\t\t// since we're resampling initial candidates of the current\n\t\t\t// frame in the center pixel. We're not resampling the center\n\t\t\t// pixel from the previous frame so we need the current surface \n\t\t\tReSTIRSurface neighbor_surface;\n\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\tneighbor_surface = center_pixel_surface;\n\t\t\telse\n\t\t\t\tneighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\t\t\tfloat target_function_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\n\t\t\tif (target_function_at_neighbor > 0.0f)\n\t\t\t{\n\t\t\t\t// If the neighbor could have produced this sample...\n\n\t\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\t\tout_normalization_denom += center_pixel_M;\n\t\t\t\telse\n\t\t\t\t\tout_normalization_denom += ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\t\t}\n\t\t}\n\n\t\t// Also taking the temporal neighbor into account which\n\t\tbool target_function_at_neighbor_gt_0;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\t// ReSTIR GI target function\n\t\t\ttarget_function_at_neighbor_gt_0 = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator) > 0.0f;\n\t\telse\n\t\t\t// ReSTIR DI target function\n\t\t\ttarget_function_at_neighbor_gt_0 = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator) > 0.0f;\n\n\t\tif (target_function_at_neighbor_gt_0)\n\t\t\tout_normalization_denom += temporal_neighbor_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum, \n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\t\tint selected_neighbor,\n\t\tint center_pixel_M, int temporal_neighbor_M, int center_pixel_index, int2 temporal_neighbor_coords,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\tout_normalization_denom = 0.0f;\n\t\tout_normalization_nume = 0.0f;\n\n\t\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t\tfor (int neighbor = 0; neighbor < spatial_pass_settings.reuse_neighbor_count + 1; neighbor++)\n\t\t{\n\t\t\t// The last iteration of the loop is a special case that resamples the initial candidates reservoir\n\t\t\t// and so neighbor_pixel_index is never going to be used so we don't need to set it\n\t\t\tint neighbor_pixel_index;\n\t\t\tif (neighbor != spatial_pass_settings.reuse_neighbor_count)\n\t\t\t{\n\t\t\t\tneighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor, temporal_neighbor_coords, random_number_generator);\n\n\t\t\t\tif (neighbor_pixel_index == -1)\n\t\t\t\t\t// Invalid neighbor\n\t\t\t\t\tcontinue;\n\n\t\t\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\t\t\tneighbor_pixel_index, center_pixel_index, \n\t\t\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface), render_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t\t// Getting the surface data at the neighbor\n\t\t\t// \n\t\t\t// The surface at the center pixel passed in parameters is\n\t\t\t// the surface in the current frame, that's what we want\n\t\t\t// since we're resampling initial candidates of the current\n\t\t\t// frame in the center pixel. We're not resampling the center\n\t\t\t// pixel from the previous frame so we need the current surface\n\t\t\tReSTIRSurface neighbor_surface;\n\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\tneighbor_surface = center_pixel_surface;\n\t\t\telse\n\t\t\t\tneighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, random_number_generator);\n\n\t\t\tfloat target_function_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, neighbor_surface, random_number_generator);\n\n\t\t\tif (target_function_at_neighbor > 0.0f)\n\t\t\t{\n\t\t\t\t// If the neighbor could have produced this sample...\n\n\t\t\t\tint M = 1;\n\t\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\t{\n\t\t\t\t\tif (neighbor == spatial_pass_settings.reuse_neighbor_count)\n\t\t\t\t\t\tM = center_pixel_M;\n\t\t\t\t\telse\n\t\t\t\t\t\tM = ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\t\t\t}\n\n\t\t\t\t// neighbor + 1 here because 0 is the temporal neighbor, not the first spatial neighbor\n\t\t\t\tif (neighbor + 1 == selected_neighbor)\n\t\t\t\t\t// Not multiplying by M here, this was done already when resampling the sample if we\n\t\t\t\t\t// were using confidence weights\n\t\t\t\t\tout_normalization_nume += target_function_at_neighbor;\n\t\t\t\tout_normalization_denom += target_function_at_neighbor * M;\n\t\t\t};\n\t\t}\n\n\t\t// Now handling the temporal neighbor\n\t\tfloat target_function_at_temporal_neighbor;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\t// ReSTIR GI target function\n\t\t\ttarget_function_at_temporal_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\t\telse\n\t\t\t// ReSTIR DI target function\n\t\t\ttarget_function_at_temporal_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\n\t\tif (selected_neighbor == TEMPORAL_NEIGHBOR_ID)\n\t\t\tout_normalization_nume += target_function_at_temporal_neighbor;\n\n\t\tint temporal_M = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights ? temporal_neighbor_M : 1;\n\t\tout_normalization_denom += target_function_at_temporal_neighbor * temporal_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors with balance heuristic MIS weights in the m_i terms\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSpatiotemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the neighbors\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/Surface.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SURFACE_H\n#define DEVICE_RESTIR_DI_SURFACE_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\nstruct ReSTIRSurface\n{\n\tDeviceUnpackedEffectiveMaterial material;\n\tRayVolumeState ray_volume_state;\n\tint primitive_index;\n\n\t// Do we need the view direction here? We can probably reconstruct it\n\tfloat3 view_direction = { 0.0f, 0.0f, 0.0f};\n\tfloat3 shading_normal = { 0.0f, 0.0f, 0.0f};\n\tfloat3 geometric_normal = { 0.0f, 0.0f, 0.0f};\n\tfloat3 shading_point = { 0.0f, 0.0f, 0.0f };\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRSurface get_pixel_surface(const HIPRTRenderData& render_data, int pixel_index, Xorshift32Generator& random_number_generator)\n{\n\tReSTIRSurface surface;\n\n\tsurface.material = render_data.g_buffer.materials[pixel_index].unpack();\n\tsurface.primitive_index = render_data.g_buffer.first_hit_prim_index[pixel_index];\n\tsurface.ray_volume_state.reconstruct_first_hit(\n\t\tsurface.material,\n\t\trender_data.buffers.material_indices,\n\t\tsurface.primitive_index,\n\t\trandom_number_generator);\n\n\tsurface.view_direction = render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n\tsurface.shading_normal = render_data.g_buffer.shading_normals[pixel_index].unpack();\n\tsurface.geometric_normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n\tsurface.shading_point = render_data.g_buffer.primary_hit_position[pixel_index];\n\n\treturn surface;\n}\n\n/**\n * Returns the surface at a pixel in the previous frame (so before the camera moved if it is in motion)\n * This is needed for unbiasedness in motion in the temporal reuse pass because when we count the neighbors\n * that could have produced the sample that we picked, we need to consider the neighbors at their previous positions,\n * not the current so we need to read in the last frame's g-buffer.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRSurface get_pixel_surface_previous_frame(const HIPRTRenderData& render_data, int pixel_index, Xorshift32Generator& random_number_generator)\n{\n\tReSTIRSurface surface;\n\n\tsurface.material = render_data.g_buffer_prev_frame.materials[pixel_index].unpack();\n\tsurface.primitive_index = render_data.g_buffer_prev_frame.first_hit_prim_index[pixel_index];\n\tsurface.ray_volume_state.reconstruct_first_hit(\n\t\tsurface.material,\n\t\trender_data.buffers.material_indices,\n\t\tsurface.primitive_index,\n\t\trandom_number_generator);\n\n\tsurface.view_direction = render_data.g_buffer.get_view_direction(render_data.prev_camera.position, pixel_index);\n\tsurface.shading_normal = render_data.g_buffer_prev_frame.shading_normals[pixel_index].unpack();\n\tsurface.geometric_normal = render_data.g_buffer_prev_frame.geometric_normals[pixel_index].unpack();\n\tsurface.shading_point = render_data.g_buffer_prev_frame.primary_hit_position[pixel_index];\n\n\treturn surface;\n}\n\n/**\n * Simple overload of the function to base the 'previous_frame' decision on a boolean instead of on the name of the function\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRSurface get_pixel_surface(const HIPRTRenderData& render_data, int pixel_index, bool previous_frame, Xorshift32Generator& random_number_generator)\n{\n\tif (previous_frame)\n\t\treturn get_pixel_surface_previous_frame(render_data, pixel_index, random_number_generator);\n\telse\n\t\treturn get_pixel_surface(render_data, pixel_index, random_number_generator);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/TemporalMISWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_MIS_WEIGHT_H\n#define DEVICE_RESTIR_DI_MIS_WEIGHT_H\n\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/MISWeightsCommon.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n\n // By convention, the temporal neighbor is the first one to be resampled in for loops \n // (for looping over the neighbors when resampling / computing MIS weights)\n // So instead of hardcoding 0 everywhere in the code, we just basically give it a name\n // with a #define\n#define TEMPORAL_NEIGHBOR_ID 0\n// Same when resampling the initial candidates\n#define INITIAL_CANDIDATES_ID 1\n\n/**\n * This structure here is only meant to encapsulate one method that\n * returns the resampling MIS weight used by the temporal resampling pass.\n * \n * This whole file basically defines the functions to compute the different resampling\n * MIS weights that the renderer supports.\n * \n * This is cleaner that having a single function with a ton of \n * \n * #if BiasCorrectionmode == 1_OVER_M\n * #elif BiasCorrectionmode == 1_OVER_Z\n * #elif BiasCorrectionmode == MIS_LIKE\n * ....\n * \n * We now have one structure per MIS weight computation mode instead of one #if / #elif\n */\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight {};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const ReSTIRDIReservoir& reservoir_being_resampled)\n\t{\n\t\t// 1/M MIS Weights are basically confidence weights only so we only need to return\n\t\t// the confidence of the reservoir\n\n\t\treturn reservoir_being_resampled.M;\n\t}\n};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const ReSTIRDIReservoir& reservoir_being_resampled)\n\t{\n\t\t// 1/Z MIS Weights are basically confidence weights only so we only need to return\n\t\t// the confidence of the reservoir. The difference with 1/M weights is how we're going\n\t\t// to normalize the reservoir at the end of the temporal/spatial resampling pass\n\n\t\treturn reservoir_being_resampled.M;\n\t}\n};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data, const ReSTIRDIReservoir& reservoir_being_resampled)\n\t{\n\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t{\n\t\t\t// MIS-like MIS weights with confidence weights are basically a mix of 1/Z \n\t\t\t// and MIS like for the normalization so we're just returning the confidence here\n\t\t\t// so that a reservoir that is being resampled gets a bigger weight depending on its \n\t\t\t// confidence weight (M).\n\n\t\t\treturn reservoir_being_resampled.M;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// MIS-like MIS weights without confidence weights do not weight the neighbor reservoirs\n\t\t\t// during resampling. We're thus returning 1.0f.\n\t\t\t// \n\t\t\t// The bulk of the work of the MIS-like weights is done in during the normalization of the reservoir\n\n\t\t\treturn 1.0f;\n\t\t}\n\t}\n};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& reservoir_being_resampled_sample,\n\t\tfloat initial_candidates_reservoir_M,\n\n\t\tReSTIRSurface& temporal_neighbor_surface, ReSTIRSurface& center_pixel_surface,\n\t\tint temporal_neighbor_reservoir_M,\n\t\tint current_neighbor_index,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tfloat nume = 0.0f;\n\t\t// We already have the target function at the center pixel, adding it to the denom\n\t\tfloat denom = 0.0f;\n\n\t\t// Evaluating the sample that we're resampling at the neighor locations (using the neighbors surfaces)\n\t\tfloat target_function_at_temporal_neighbor = 0.0f;\n\t\tif (temporal_neighbor_reservoir_M != 0)\n\t\t{\n\t\t\t// Only computing the target function if we do have a temporal neighbor\n\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_at_temporal_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, temporal_neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_at_temporal_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, temporal_neighbor_surface, random_number_generator);\n\t\t}\n\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID && target_function_at_temporal_neighbor == 0.0f)\n\t\t\t// If we're currently computing the MIS weight for the temporal neighbor,\n\t\t\t// this means that we're going to have the temporal neighbor weight \n\t\t\t// (target function) in the numerator. But if the target function\n\t\t\t// at the temporal neighbor is 0.0f, then we're going to have 0.0f\n\t\t\t// in the numerator --> 0.0f MIS weight anyways --> no need to\n\t\t\t// compute anything else, we can already return 0.0f for the MIS weight.\n\t\t\treturn 0.0f;\n\n\t\tfloat target_function_at_center;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\t// ReSTIR GI target function\n\t\t\ttarget_function_at_center = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, temporal_neighbor_surface, random_number_generator);\n\t\telse\n\t\t\t// ReSTIR DI target function\n\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, reservoir_being_resampled_sample, temporal_neighbor_surface, random_number_generator);\n\n\t\tint temporal_M = temporal_neighbor_reservoir_M;\n\t\tint center_reservoir_M = initial_candidates_reservoir_M;\n\t\tif (!ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t{\n\t\t\ttemporal_M = 1;\n\t\t\tcenter_reservoir_M = 1;\n\t\t}\n\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID)\n\t\t\tnume = target_function_at_temporal_neighbor * temporal_M;\n\t\telse\n\t\t\tnume = target_function_at_center * center_reservoir_M;\n\n\t\tdenom = target_function_at_temporal_neighbor * temporal_M + target_function_at_center * center_reservoir_M;\n\n\t\tif (denom == 0.0f)\n\t\t\treturn 0.0f;\n\t\telse\n\t\t\treturn nume / denom;\n\t}\n};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tReSTIRReservoirType<IsReSTIRGI>& temporal_neighbor_reservoir,\n\t\tReSTIRReservoirType<IsReSTIRGI>& initial_candidates_reservoir,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\n\t\tfloat neighbor_sample_target_function_at_center, int current_neighbor_index,\n\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID)\n\t\t{\n\t\t\t// Resampling the temporal neighbor\n\n\t\t\tfloat target_function_at_neighbor = temporal_neighbor_reservoir.sample.target_function;\n\t\t\tfloat target_function_at_center = neighbor_sample_target_function_at_center;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat temporal_neighbor_M = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? initial_candidates_reservoir.M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\n\t\t\tfloat nume = target_function_at_neighbor * temporal_neighbor_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center * center_reservoir_M;\n\t\t\tfloat mi = denom == 0.0f ? 0.0f : (nume / denom);\n\n\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t{\n\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\tif (!initial_candidates_reservoir.sample.is_envmap_path())\n\t\t\t\t\t{\n\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(initial_candidates_reservoir.sample.sample_point, initial_candidates_reservoir.sample.sample_point_geometric_normal.unpack(), temporal_neighbor_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\tfloat target_function_center_sample_at_center = initial_candidates_reservoir.sample.target_function;\n\n\t\t\tfloat nume_mc = target_function_center_sample_at_center * center_reservoir_M;\n\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center * center_reservoir_M;\n\n\t\t\tfloat confidence_multiplier = 1.0f;\n\t\t\tif (use_confidence_weights)\n\t\t\t\tconfidence_multiplier = temporal_neighbor_M / neighbors_confidence_sum;\n\n\t\t\tif (denom_mc != 0.0f)\n\t\t\t\tmc += nume_mc / denom_mc * confidence_multiplier;\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel (initial candidates)\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tReSTIRReservoirType<IsReSTIRGI>& temporal_neighbor_reservoir,\n\t\tReSTIRReservoirType<IsReSTIRGI>& initial_candidates_reservoir,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\n\t\tfloat neighbor_sample_target_function_at_center, int current_neighbor_index,\n\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID)\n\t\t{\n\t\t\t// Resampling the temporal neighbor\n\n\t\t\tfloat target_function_at_neighbor = temporal_neighbor_reservoir.sample.target_function;\n\t\t\tfloat target_function_at_center = neighbor_sample_target_function_at_center;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat temporal_neighbor_M = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? initial_candidates_reservoir.M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\n\t\t\tfloat nume = target_function_at_neighbor * temporal_neighbor_M;\n\t\t\tfloat denom = target_function_at_neighbor * neighbors_confidence_sum + target_function_at_center * center_reservoir_M;\n\t\t\tfloat mi = denom == 0.0f ? 0.0f : (nume / denom);\n\t\t\tif (use_confidence_weights)\n\t\t\t\t// Eq 7.8\n\t\t\t\tmi *= neighbors_confidence_sum / (neighbors_confidence_sum + center_reservoir_M);\n\n\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t{\n\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\tif (!initial_candidates_reservoir.sample.is_envmap_path())\n\t\t\t\t\t{\n\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(initial_candidates_reservoir.sample.sample_point, initial_candidates_reservoir.sample.sample_point_geometric_normal.unpack(), temporal_neighbor_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\tfloat target_function_center_sample_at_center = initial_candidates_reservoir.sample.target_function;\n\n\t\t\tfloat nume_mc = target_function_center_sample_at_center * center_reservoir_M;\n\t\t\tfloat denom_mc = target_function_center_sample_at_neighbor * neighbors_confidence_sum + target_function_center_sample_at_center * center_reservoir_M;\n\t\t\tfloat confidence_multiplier = 1.0f;\n\t\t\tif (use_confidence_weights)\n\t\t\t\tconfidence_multiplier = neighbors_confidence_sum / (neighbors_confidence_sum + center_reservoir_M);\n\n\t\t\tif (denom_mc != 0.0f)\n\t\t\t\tmc += nume_mc / denom_mc * confidence_multiplier;\n\n\t\t\tif (use_confidence_weights)\n\t\t\t\treturn mi;\n\t\t\telse\n\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\")\n\t\t\t\t// And we only want to divide if not using confidence weights\n\t\t\t\t// \n\t\t\t\t// M = 2 (center reservoir + temporal reservoir)\n\t\t\t\treturn mi * 0.5f;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel (initial candidates)\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\n\t\t\t\t// In the defensive formulation, we want to divide by M, not M-1.\n\t\t\t\t// (Eq. 7.6 of \"A Gentle Introduction to ReSTIR\")\n\t\t\t\tif (ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights)\n\t\t\t\t\treturn mc + static_cast<float>(initial_candidates_reservoir.M) / static_cast<float>(initial_candidates_reservoir.M + temporal_neighbor_reservoir.M);\n\t\t\t\telse\n\t\t\t\t\treturn (1.0f + mc) * 0.5f;\n\t\t\t}\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate<bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tReSTIRReservoirType<IsReSTIRGI>& temporal_neighbor_reservoir,\n\t\tReSTIRReservoirType<IsReSTIRGI>& initial_candidates_reservoir,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\n\t\tfloat neighbor_sample_target_function_at_center, int current_neighbor_index,\n\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID)\n\t\t{\n\t\t\t// Resampling the temporal neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = temporal_neighbor_reservoir.sample.target_function;\n\t\t\tfloat target_function_neighbor_sample_at_center = neighbor_sample_target_function_at_center;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat temporal_neighbor_M = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? initial_candidates_reservoir.M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? temporal_neighbor_M : 1;\n\n\t\t\t// Eq. 15 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(target_function_neighbor_sample_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi = difference_function * temporal_neighbor_M;\n\t\t\tfloat denom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t{\n\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\tif (!initial_candidates_reservoir.sample.is_envmap_path())\n\t\t\t\t\t{\n\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(initial_candidates_reservoir.sample.sample_point, initial_candidates_reservoir.sample.sample_point_geometric_normal.unpack(), temporal_neighbor_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\tfloat target_function_center_sample_at_center = initial_candidates_reservoir.sample.target_function;\n\n\t\t\tfloat nume_mc = center_reservoir_M;\n\t\t\tfloat denom_mc = center_reservoir_M + neighbors_confidence_sum * symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\n\t\t\tfloat confidence_weights_multiplier;\n\t\t\tif (use_confidence_weights)\n\t\t\t{\n\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = temporal_neighbor_M / neighbors_confidence_sum;\n\t\t\t}\n\t\t\telse\n\t\t\t\tconfidence_weights_multiplier = 1.0f;\n\n\t\t\tmc += confidence_weights_multiplier * nume_mc / denom_mc;\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel (initial candidates)\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\treturn mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalResamplingMISWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE float get_resampling_MIS_weight(const HIPRTRenderData& render_data,\n\t\tReSTIRReservoirType<IsReSTIRGI>& temporal_neighbor_reservoir,\n\t\tReSTIRReservoirType<IsReSTIRGI>& initial_candidates_reservoir,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\n\t\tfloat neighbor_sample_target_function_at_center, int current_neighbor_index,\n\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (current_neighbor_index == TEMPORAL_NEIGHBOR_ID)\n\t\t{\n\t\t\t// Resampling a neighbor\n\n\t\t\tfloat target_function_neighbor_sample_at_neighbor = temporal_neighbor_reservoir.sample.target_function;\n\t\t\tfloat target_function_center_sample_at_center = initial_candidates_reservoir.sample.target_function;\n\n\t\t\tbool use_confidence_weights = ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).use_confidence_weights;\n\t\t\tfloat temporal_neighbor_M = use_confidence_weights ? temporal_neighbor_reservoir.M : 1;\n\t\t\tfloat center_reservoir_M = use_confidence_weights ? initial_candidates_reservoir.M : 1;\n\t\t\tfloat neighbors_confidence_sum = use_confidence_weights ? temporal_neighbor_M : 1;\n\n\t\t\t// Eq. 15 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tfloat difference_function = symmetric_ratio_MIS_weights_difference_function(neighbor_sample_target_function_at_center, target_function_neighbor_sample_at_neighbor, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tfloat nume_mi, denom_mi;\n\n\t\t\t// Eq. 16 of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, 2024] generalized\n\t\t\t// with confidence weights\n\t\t\tif (neighbor_sample_target_function_at_center <= target_function_neighbor_sample_at_neighbor)\n\t\t\t{\n\t\t\t\tnume_mi = difference_function * temporal_neighbor_M;\n\t\t\t\tdenom_mi = center_reservoir_M + neighbors_confidence_sum * difference_function;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tnume_mi = difference_function * temporal_neighbor_M;\n\t\t\t\tdenom_mi = center_reservoir_M + neighbors_confidence_sum;\n\t\t\t}\n\n\t\t\tfloat mi = nume_mi / denom_mi;\n\n\t\t\tfloat target_function_center_sample_at_neighbor;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\t\t// Because we're using the target function as a PDF here, we need to scale the PDF\n\t\t\t\t// by the jacobian. That's p_hat_from_i, Eq. 5.9 of \"A Gentle Introduction to ReSTIR\"\n\n\t\t\t\t// Only doing this if we at least have a target function to scale by the jacobian\n\t\t\t\tif (target_function_center_sample_at_neighbor > 0.0f)\n\t\t\t\t{\n\t\t\t\t\t// If this is an envmap path the jacobian is just 1 so this is not needed\n\t\t\t\t\tif (!initial_candidates_reservoir.sample.is_envmap_path())\n\t\t\t\t\t{\n\t\t\t\t\t\tfloat jacobian = get_jacobian_determinant_reconnection_shift(initial_candidates_reservoir.sample.sample_point, initial_candidates_reservoir.sample.sample_point_geometric_normal.unpack(), temporal_neighbor_surface.shading_point, center_pixel_surface.shading_point, render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\n\t\t\t\t\t\tif (jacobian == 0.0f)\n\t\t\t\t\t\t\t// Clamping at 0.0f so that if the jacobian returned is -1.0f (meaning that the jacobian doesn't match the threshold\n\t\t\t\t\t\t\t// and has been rejected), the target function is set to 0\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor = 0.0f;\n\t\t\t\t\t\telse\n\t\t\t\t\t\t\ttarget_function_center_sample_at_neighbor *= jacobian;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttarget_function_center_sample_at_neighbor = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, initial_candidates_reservoir.sample, temporal_neighbor_surface, random_number_generator);\n\n\t\t\tfloat nume_mc, denom_mc;\n\n\t\t\tfloat difference_function_mc = symmetric_ratio_MIS_weights_difference_function(target_function_center_sample_at_neighbor, target_function_center_sample_at_center, ReSTIRSettingsHelper::get_restir_settings<IsReSTIRGI>(render_data).symmetric_ratio_mis_weights_beta_exponent);\n\t\t\tif (target_function_center_sample_at_center <= target_function_center_sample_at_neighbor)\n\t\t\t{\n\t\t\t\tnume_mc = difference_function_mc * temporal_neighbor_M;\n\t\t\t\tdenom_mc = center_reservoir_M + neighbors_confidence_sum * difference_function_mc;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tnume_mc = difference_function_mc * temporal_neighbor_M;\n\t\t\t\tdenom_mc = center_reservoir_M + neighbors_confidence_sum;\n\t\t\t}\n\n\t\t\t/*float confidence_weights_multiplier;\n\t\t\tif (use_confidence_weights)\n\t\t\t{\n\t\t\t\tif (neighbors_confidence_sum == 0.0f)\n\t\t\t\t\tconfidence_weights_multiplier = 0.0f;\n\t\t\t\telse\n\t\t\t\t\tconfidence_weights_multiplier = reservoir_resampled_M / neighbors_confidence_sum;\n\t\t\t}\n\t\t\telse\n\t\t\t\tconfidence_weights_multiplier = 1.0f / valid_neighbors_count;*/\n\n\t\t\tmc += nume_mc / denom_mc;\n\n\t\t\treturn mi;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Resampling the center pixel\n\n\t\t\tif (mc == 0.0f)\n\t\t\t\t// If there was no neighbor resampling (and mc hasn't been accumulated),\n\t\t\t\t// then the MIS weight should be 1 for the center pixel. It gets all the weight\n\t\t\t\t// since no neighbor was resampled\n\t\t\t\treturn 1.0f;\n\t\t\telse\n\t\t\t\t// Returning the weight accumulated so far when resampling the neighbors.\n\t\t\t\t// \n\t\t\t\t// !!! This assumes that the center pixel is resampled last (which it is in this ReSTIR implementation) !!!\n\t\t\t\t//\n\t\t\t\t// This is Eq. 16 of the paper: y not in R: m_i(y) = 1 - Sum(...) / |R|\n\t\t\t\t// mc here is the sum\n\t\t\t\t// and |R| is 1\n\t\t\t\treturn 1.0f - mc;\n\t\t}\n\t}\n\n\t// Weight for the canonical sample (center pixel)\n\tfloat mc = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/TemporalNormalizationWeight.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_NORMALIZATION_WEIGHT_H\n#define DEVICE_RESTIR_DI_NORMALIZATION_WEIGHT_H\n\n#include \"Device/includes/ReSTIR/Utils.h\"\n\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n\n // By convention, the temporal neighbor is the first one to be resampled in for loops \n // (for looping over the neighbors when resampling / computing MIS weights)\n // So instead of hardcoding 0 everywhere in the code, we just basically give it a name\n // with a #define\n#define TEMPORAL_NEIGHBOR_ID 0\n// Same when resampling the initial candidates\n#define INITIAL_CANDIDATES_ID 1\n\n/**\n * This structure here is only meant to encapsulate one method that\n * returns the numerator and denominator for normalizing a reservoir at\n * the end of the temporal / spatial reuse pass.\n * \n * This is cleaner that having a single function with a ton of \n * \n * #if BiasCorrectionmode == 1_OVER_M\n * #elif BiasCorrectionmode == 1_OVER_Z\n * #elif BiasCorrectionmode == MIS_LIKE\n * ....\n * \n * We now have one structure per bias correction method one #if / #elif\n */\ntemplate <int BiasCorrectionMode, bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight {};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_M, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float final_reservoir_weight_sum,\n\t\tint initial_candidates_M, int temporal_neighbor_M,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\t// 1/M MIS weights are basically confidence weights only i.e. c_i / sum(c_j) with\n\t\t// c_i = r_i.M\n\n\t\tout_normalization_nume = 1.0f;\n\t\t// We're simply going to divide by the sum of all the M values of all the neighbors we resampled (including the center pixel)\n\t\t// so we're only going to set the denominator to that and the numerator isn't going to change\n\t\tout_normalization_denom = initial_candidates_M + temporal_neighbor_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_1_OVER_Z, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum, \n\t\tint initial_candidates_M, int temporal_neighbor_M,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\tout_normalization_nume = 1.0f;\n\t\t// Checking how many of our neighbors could have produced the sample that we just picked\n\t\t// and we're going to divide by the sum of M values of those neighbors\n\t\tout_normalization_denom = 0.0f;\n\n\t\t// We're resampling from two reservoirs (the initial candidates and the temporal neighbor).\n\t\t// Either of these two reservoirs could have potentially produced the sample that we retained\n\t\t// in the 'reservoir' parameter.\n\t\t// \n\t\t// The question is: how many neighbors could have produced that sample?\n\t\t// The sample could have been produced by a neighbor if the target function of the neighbor with\n\t\t// that sample is > so we're going to check both target function here.\n\n\t\t// Evaluating the target function at the center pixel because this is the pixel of the initial candidates\n\t\tfloat center_pixel_target_function;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\t// ReSTIR GI target function\n\t\t\tcenter_pixel_target_function = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, center_pixel_surface, random_number_generator);\n\t\telse\n\t\t\t// ReSTIR DI target function\n\t\t\tcenter_pixel_target_function = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, center_pixel_surface, random_number_generator);\n\t\t\n\t\t// if the sample contained in our final reservoir (the 'reservoir' parameter) could have been produced by the center\n\t\t// pixel, we're adding the confidence of that pixel to the denominator for normalization\n\t\tout_normalization_denom += (center_pixel_target_function > 0) * initial_candidates_M;\n\n\t\tif (temporal_neighbor_M > 0)\n\t\t{\n\t\t\t// We only want to check if the temporal could have produced the sample if we actually have a temporal neighbor\n\t\t\tfloat temporal_neighbor_target_function;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttemporal_neighbor_target_function = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttemporal_neighbor_target_function = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\t\t\tout_normalization_denom += (temporal_neighbor_target_function > 0) * temporal_neighbor_M;\n\t\t}\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_LIKE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(const HIPRTRenderData& render_data,\n\t\tconst ReSTIRSampleType<IsReSTIRGI>& final_reservoir_sample, float final_reservoir_weight_sum,\n\t\tint initial_candidates_M, int temporal_neighbor_M,\n\t\tReSTIRSurface& center_pixel_surface, ReSTIRSurface& temporal_neighbor_surface,\n\t\tint selected_neighbor,\n\t\tfloat& out_normalization_nume, float& out_normalization_denom,\n\t\tXorshift32Generator& random_number_generator)\n\t{\n\t\tif (final_reservoir_weight_sum <= 0)\n\t\t{\n\t\t\t// Invalid reservoir, returning directly\n\t\t\tout_normalization_nume = 1.0f;\n\t\t\tout_normalization_denom = 1.0f;\n\n\t\t\treturn;\n\t\t}\n\n\t\tfloat center_pixel_target_function;\n\t\tif constexpr (IsReSTIRGI)\n\t\t\t// ReSTIR GI target function\n\t\t\tcenter_pixel_target_function = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, center_pixel_surface, random_number_generator);\n\t\telse\n\t\t\t// ReSTIR DI target function\n\t\t\tcenter_pixel_target_function = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, center_pixel_surface, random_number_generator);\n\n\t\tfloat temporal_neighbor_target_function = 0.0f;\n\t\tif (temporal_neighbor_M > 0)\n\t\t{\n\t\t\t// Only evaluating the target function if we actually have a temporal neighbor because if we don't,\n\t\t\t// this means that no temporal neighbor contributed to the resampling of the sample in 'reservoir'\n\t\t\t// and if the temporal neighbor didn't contribute to the resampling, then this is not, in MIS terms,\n\t\t\t// a sampling technique/strategy to take into account in the MIS weight\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t// ReSTIR GI target function\n\t\t\t\ttemporal_neighbor_target_function = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\t\t\telse\n\t\t\t\t// ReSTIR DI target function\n\t\t\t\ttemporal_neighbor_target_function = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, final_reservoir_sample, temporal_neighbor_surface, random_number_generator);\n\t\t}\n\n\t\tif (selected_neighbor == INITIAL_CANDIDATES_ID)\n\t\t\t// The point of the MIS-like MIS weights is to have the weight of the sample that we picked\n\t\t\t// in the numerator and the sum of everyone in the denominator.\n\t\t\t//\n\t\t\t// So if this is the sample that we picked, we're putting its target function value in the numerator\n\t\t\t//\n\t\t\t// Not multiplying by M here because this is done already during the resampling (in the resampling MIS weights)\n\t\t\tout_normalization_nume = center_pixel_target_function;\n\t\telse\n\t\t\t// Otherwise, if the sample that we picked is from the temporal neighbor, then the temporal\n\t\t\t// neighbor's target function is the one in the numerator\n\t\t\tout_normalization_nume = temporal_neighbor_target_function;\n\n\t\tif (!render_data.render_settings.restir_di_settings.use_confidence_weights)\n\t\t{\n\t\t\t// If not using confidence weights, settings the weights to 1 so that everyone has the same weight\n\t\t\tinitial_candidates_M = 1;\n\t\t\ttemporal_neighbor_M = 1;\n\t\t}\n\n\t\tout_normalization_denom = center_pixel_target_function * initial_candidates_M + temporal_neighbor_target_function * temporal_neighbor_M;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_MIS_GBH, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the \n\t\t// neighbors with balance heuristic MIS weights in the m_i terms\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the \n\t\t// neighbors. Everything is already in the MIS weights m_i.\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the \n\t\t// neighbors. Everything is already in the MIS weights m_i.\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the \n\t\t// neighbors. Everything is already in the MIS weights m_i.\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n}; \n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRTemporalNormalizationWeight<RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO, IsReSTIRGI>\n{\n\tHIPRT_HOST_DEVICE void get_normalization(float& out_normalization_nume, float& out_normalization_denom)\n\t{\n\t\t// Nothing more to normalize, everything is already handled when resampling the \n\t\t// neighbors. Everything is already in the MIS weights m_i.\n\t\tout_normalization_nume = 1.0f;\n\t\tout_normalization_denom = 1.0f;\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/Utils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_UTILS_H\n#define DEVICE_RESTIR_DI_UTILS_H \n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/NeighborSimilarity.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n\n/**\n * 'last_primitive_hit_index' is the index of the triangle we're currently sitting \n * on and that we're shooting a ray from. This is used to avoid self intersections.\n * \n * Returns true if the reservoir was killed, false otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool ReSTIR_DI_visibility_test_kill_reservoir(const HIPRTRenderData& render_data, ReSTIRDIReservoir& reservoir, float3 shading_point, int last_primitive_hit_index, Xorshift32Generator& random_number_generator)\n{\n\tif (reservoir.UCW <= 0.0f && reservoir.weight_sum <= 0.0f)\n\t\treturn false;\n\telse if (reservoir.sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED)\n\t\t// The sample is already unoccluded, no need to test for visibility\n\t\treturn false;\n\n\tfloat distance_to_light;\n\tfloat3 sample_direction;\n\tif (reservoir.sample.is_envmap_sample())\n\t{\n\t\tsample_direction = matrix_X_vec(render_data.world_settings.envmap_to_world_matrix, reservoir.sample.point_on_light_source);\n\t\tdistance_to_light = 1.0e35f;\n\t}\n\telse\n\t{\n\t\tsample_direction = reservoir.sample.point_on_light_source - shading_point;\n\t\tsample_direction /= (distance_to_light = hippt::length(sample_direction));\n\t}\n\n\thiprtRay shadow_ray;\n\tshadow_ray.origin = shading_point;\n\tshadow_ray.direction = sample_direction;\n\n\tbool visible = !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, last_primitive_hit_index, /* bounce. Always 0 for ReSTIR DI*/ 0, random_number_generator);\n\tif (!visible)\n\t{\n\t\t// Setting to -1 here so that we know when debugging that this is because of visibility reuse\n\t\treservoir.UCW = -1.0f;\n\n\t\treturn true;\n\t}\n\telse\n\t{\n\t\t// Visible so the sample is unoccluded\n\t\treservoir.sample.flags |= RESTIR_DI_FLAGS_UNOCCLUDED;\n\n\t\treturn false;\n\t}\n}\n\n/**\n * Tests the visibility of the sample containde in 'reservoir' from the given shading point and kills the reservoir\n * if the visibility is occluded\n * \n * Returns true if the reservoir was killed, false otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool ReSTIR_GI_visibility_validation(const HIPRTRenderData& render_data, ReSTIRGIReservoir& reservoir, float3 shading_point, int last_hit_primitive_index, Xorshift32Generator& random_number_generator)\n{\n\tif (reservoir.UCW <= 0.0f && reservoir.weight_sum <= 0.0f)\n\t\treturn false;\n\n\tfloat distance_to_sample_point;\n\tfloat3 sample_direction;\n\tif (reservoir.sample.is_envmap_path())\n\t{\n\t\t// For envmap path, the direction is stored in the 'sample_point' value\n\t\tsample_direction = reservoir.sample.sample_point;\n\t\tdistance_to_sample_point = 1.0e35f;\n\t}\n\telse\n\t{\n\t\t// Not an envmap path, the direction is the difference between the current shading\n\t\t// point and the reconnection point\n\t\tsample_direction = reservoir.sample.sample_point - shading_point;\n\t\tdistance_to_sample_point = hippt::length(sample_direction);\n\t\tif (distance_to_sample_point <= 1.0e-6f)\n\t\t{\n\t\t\t// To avoid numerical instabilities, killing the reservoir\n\t\t\treservoir.UCW = 0.0f;\n\n\t\t\treturn true;\n\t\t}\n\n\t\tsample_direction /= distance_to_sample_point;\n\t}\n\n\thiprtRay shadow_ray;\n\tshadow_ray.origin = shading_point;\n\tshadow_ray.direction = sample_direction;\n\n\tbool visible = !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_sample_point, last_hit_primitive_index, \n\t\t/* bounce. Always 1 for ReSTIR GI from visible point to sample point */ 1, random_number_generator);\n\n\tif (!visible)\n\t{\n\t\t// Setting to -1 here so that we know when debugging that this is because of visibility reuse\n\t\treservoir.UCW = 0.0f;\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/UtilsSpatial.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_UTILS_SPATIAL_H\n#define DEVICE_RESTIR_UTILS_SPATIAL_H\n\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n\n#include \"HostDeviceCommon/KernelOptions/ReSTIRGIOptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/ReSTIR/ReSTIRCommonSettings.h\"\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE void setup_adaptive_directional_spatial_reuse(HIPRTRenderData& render_data, unsigned int center_pixel_index, Xorshift32Generator& random_number_generator)\n{\n\tReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t// Generating a unique seed per pixel that will be used to generate the spatial neighbors of that pixel if Hammersley isn't used\n\tspatial_pass_settings.spatial_neighbors_rng_seed = random_number_generator.xorshift32();\n\n\tif (spatial_pass_settings.do_adaptive_directional_spatial_reuse(render_data.render_settings.accumulate))\n\t{\n\t\tspatial_pass_settings.reuse_radius = spatial_pass_settings.per_pixel_spatial_reuse_radius[center_pixel_index];\n\t\t// Storing the direction reuse mask in the 'current_pixel_directions_reuse_mask' field of the spatial\n\t\t// reuse settings so that we don't have to carry that parameter around in function calls everywhere...\n\t\t//\n\t\t// This parameter will be read by later by the function that samples a neighbor based on the allowed directions\n\t\tspatial_pass_settings.current_pixel_directions_reuse_mask = ReSTIRSettingsHelper::get_spatial_reuse_direction_mask_ull<IsReSTIRGI>(render_data, center_pixel_index);\n\n\t\tif (spatial_pass_settings.reuse_radius == 0)\n\t\t\tspatial_pass_settings.reuse_neighbor_count = 0;\n\t}\n}\n\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE HIPRT_INLINE bool do_include_visibility_term_or_not(const HIPRTRenderData& render_data, int current_neighbor_index)\n{\n\tconst ReSTIRCommonSpatialPassSettings& spatial_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\tbool visibility_only_on_last_pass = spatial_settings.do_visibility_only_last_pass;\n\tbool is_last_pass = spatial_settings.spatial_pass_index == spatial_settings.number_of_passes - 1;\n\n\t// Only using the visibility term on the last pass if so desired\n\tbool include_target_function_visibility = visibility_only_on_last_pass && is_last_pass;\n\t// Also allowing visibility if we want it at every pass\n\tinclude_target_function_visibility |= !spatial_settings.do_visibility_only_last_pass;\n\n\t// Only doing visibility for a few neighbors depending on 'neighbor_visibility_count'\n\tinclude_target_function_visibility &= current_neighbor_index < spatial_settings.neighbor_visibility_count;\n\n\t// Only doing visibility if we want it at all\n\tinclude_target_function_visibility &= (IsReSTIRGI ? ReSTIR_GI_SpatialTargetFunctionVisibility : ReSTIR_DI_SpatialTargetFunctionVisibility);\n\n\t// We don't want visibility for the center pixel because we're going to reuse the\n\t// target function stored in the reservoir anyways\n\t// Note: the center pixel has index 'spatial_settings.reuse_neighbor_count'\n\t// while actual *neighbors* have index between [0, spatial_settings.reuse_neighbor_count - 1]\n\tinclude_target_function_visibility &= current_neighbor_index != spatial_settings.reuse_neighbor_count;\n\n\treturn include_target_function_visibility;\n}\n\n/**\n * Returns a pair of random numbers that should be used to sample the spatial neighbor disk of the current pixel\n * (i.e. pass the returned float2 to 'sample_in_disk_uv').\n * \n * This function samples UVs for sampling in a disk such that the point sampled is only sampled in the allowed\n * directions of a pixel (according to its direction reuse masks).\n * \n * Note that this function will sample the first sector if there are no sectors available around the given pixel\n */\nHIPRT_DEVICE float2 sample_spatial_neighbor_from_allowed_directions(const HIPRTRenderData& render_data, const ReSTIRCommonSpatialPassSettings& spatial_pass_settings, int2 center_pixel_coords, Xorshift32Generator& rng)\n{\n\tunsigned long long int directions_mask = spatial_pass_settings.current_pixel_directions_reuse_mask;\n\tint number_of_allowed_sectors = hippt::popc(directions_mask);\n\tunsigned char random_sector_index = rng.random_index(number_of_allowed_sectors);\n\n\t// Now that we have our random sector, we need to find what theta rotation corresponds\n\t// to that sector\n\t// \n\t// So we're counting how many sectors come before our 'random_sector_index' and we're going to\n\t// multiply that sector count by 2Pi / 32 (or / 64 if using 64 bits)\n\tunsigned char count_left_to_go = random_sector_index + 1;\n\n\t// Counting how many sectors there before we reach our 'random_sector_index'\n\tint sector_index = 0;\n\n\tunsigned char bit_count_so_far = 0;\n\tif (hippt::popc(directions_mask) == ReSTIR_GI_SpatialDirectionalReuseBitCount)\n\t\t// Fast path if all the directions are allowed\n\t\tsector_index = random_sector_index;\n\telse\n\t{\n\t\t// A naive implementation of this would go something like\n\t\t// \n\t\t// for (i = 0; i < ReSTIR_GI_SpatialDirectionalReuseBitCount; i++)\n\t\t// {\n\t\t//     if (directions_mask & (1ull << i))\n\t\t//     {\n\t\t//         --count_left_to_go;\n\t\t//\n\t\t//         if (count_left_to_go == 0)\n\t\t//             break;\n\t\t//     }\n\t\t// }\n\t\t// sector_index = i;\n\t\t// \n\t\t// i.e., counting the bits one by one until we counted the number of bits we needed\n\t\t// \n\t\t// \n\t\t// But here we're going to count the sectors 'count_left_to_go' by 'count_left_to_go' to get things\n\t\t// a bit faster.\n\t\t// \n\t\t// So if we have the directions mask:\n\t\t//\t- 01110000\n\t\t//\n\t\t// and we want the 4th valid sector, i.e. random_sector_index == 3, then we can just go ahead and\n\t\t// count bits 4 by 4:\n\t\t//\n\t\t// 11110000 <--- 'directions_mask'\n\t\t// &\n\t\t// 00001111 <--- 'mask'\n\t\t// =\n\t\t// 00000000. \n\t\t// --> popc(00000000) = 0 -----> 0 bits found\n\t\t//\n\t\t// We move the mask to the left by the number of bits we still have to find (which is still 4):\n\t\t// 11110000 <--- 'directions_mask'\n\t\t// &\n\t\t// 11110000 <--- 'mask'\n\t\t// =\n\t\t// 11110000. \n\t\t// --> popc(11110000) = 4 -----> 4 bits found --> we found all the bits we needed so the sector index\n\t\t// is in position '10000000' = 7 here\n\t\twhile (count_left_to_go > 0)\n\t\t{\n\t\t\tunsigned char mask_length = count_left_to_go;\n\t\t\tunsigned long long int mask = ((1ull << mask_length) - 1ull) << bit_count_so_far;\n\t\t\tint count_mask = hippt::popc(directions_mask & mask);\n\n\t\t\tcount_left_to_go -= count_mask;\n\t\t\tbit_count_so_far += mask_length;\n\t\t}\n\n\t\tsector_index = --bit_count_so_far;\n\t}\n\n\tfloat theta_start = sector_index / (float)ReSTIR_GI_SpatialDirectionalReuseBitCount;\n\t// Generating a random theta in between theta_start and the start of the next sector (which is 1.0f / 32.0f wide)\n\t// i.e. a random theta inside our disk sector\n\tfloat random_theta = theta_start + rng() * (1.0f / (float)ReSTIR_GI_SpatialDirectionalReuseBitCount);\n\n\treturn make_float2(random_theta, rng());\n}\n\n/**\n * Returns the linear index that can be used directly to index a buffer\n * of render_data of the 'neighbor_number'th neighbor that we're going\n * to spatially reuse from\n *\n * 'neighbor_number' is in [0, neighbor_reuse_count]\n * 'neighbor_reuse_count' is in [1, ReSTIRCommonSpatialPassSettings.reuse_neighbor_count]\n * 'neighbor_reuse_radius' is the radius of the disk within which the neighbors are sampled\n * 'center_pixel_coords' is the coordinates of the center pixel that is currently\n *\t\tdoing the resampling of its neighbors. Neighbors will be spatially sampled\n *\t\taround that position\n * 'res' is the resolution of the viewport. This is used to check whether the generated\n *\t\tneighbor location is outside of the viewport or not\n * 'rng' is a random generator used for generating spatial neighbor positions if not using a Hammersley\n *\t\tpoint set. \n * \n *\t\tOnly used if render_data.render_settings.restir_settings.common_spatial_pass.use_hammersley == false\n */\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE HIPRT_INLINE int get_spatial_neighbor_pixel_index(const HIPRTRenderData& render_data, int neighbor_index, int2 center_pixel_coords, Xorshift32Generator& rng)\n{\n\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\n\tint neighbor_pixel_index;\n\tif (neighbor_index == spatial_pass_settings.reuse_neighbor_count)\n\t{\n\t\t// If this is the last neighbor, we set it to ourselves\n\t\t// This is why our loop on the neighbors goes up to 'i < NEIGHBOR_REUSE_COUNT + 1'\n\t\t// It's so that when i == NEIGHBOR_REUSE_COUNT, we resample ourselves\n\t\tneighbor_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t}\n\telse\n\t{\n\t\t// +1 and +1 here because we want to skip the first point as it is always (0, 0)\n\t\t// which means that we would be resampling ourselves (the center pixel) --> \n\t\t// pointless because we already resample ourselves \"manually\" (that's why there's that\n\t\t// \"if (neighbor_index == neighbor_reuse_count)\" above, to resample the center pixel)\n\t\tfloat2 uv;\n\t\tif (spatial_pass_settings.do_adaptive_directional_spatial_reuse(render_data.render_settings.accumulate))\n\t\t\tuv = sample_spatial_neighbor_from_allowed_directions(render_data, spatial_pass_settings, center_pixel_coords, rng);\n\t\telse\n\t\t\tuv = make_float2(rng(), rng());\n\n\t\tfloat2 neighbor_offset_in_disk = sample_in_disk_uv(spatial_pass_settings.reuse_radius, uv);\n\n\t\tint2 neighbor_offset_int = make_int2(static_cast<int>(roundf(neighbor_offset_in_disk.x)), static_cast<int>(roundf(neighbor_offset_in_disk.y)));\n\n\t\tint2 neighbor_pixel_coords;\n\t\tif (spatial_pass_settings.debug_neighbor_location)\n\t\t{\n\t\t\tint2 offset;\n\t\t\tif (spatial_pass_settings.debug_neighbor_location_direction == 0)\n\t\t\t\t// Horizontal\n\t\t\t\toffset = make_int2(spatial_pass_settings.reuse_radius, 0);\n\t\t\telse if (spatial_pass_settings.debug_neighbor_location_direction == 1)\n\t\t\t\t// Vertical\n\t\t\t\toffset = make_int2(0, spatial_pass_settings.reuse_radius);\n\t\t\telse\n\t\t\t\t// Diagonal\n\t\t\t\toffset = make_int2(spatial_pass_settings.reuse_radius, spatial_pass_settings.reuse_radius);\n\n\t\t\tneighbor_pixel_coords = center_pixel_coords + offset;\n\t\t}\n\t\telse\n\t\t\tneighbor_pixel_coords = center_pixel_coords + neighbor_offset_int;\n\n\t\tif (neighbor_pixel_coords.x < 0 || neighbor_pixel_coords.x >= render_data.render_settings.render_resolution.x ||\n\t\t\tneighbor_pixel_coords.y < 0 || neighbor_pixel_coords.y >= render_data.render_settings.render_resolution.y)\n\t\t\t// Rejecting the sample if it's outside of the viewport\n\t\t\treturn -1;\n\n\t\tneighbor_pixel_index = neighbor_pixel_coords.x + neighbor_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\t\tif (render_data.render_settings.enable_adaptive_sampling && render_data.render_settings.sample_number >= render_data.render_settings.adaptive_sampling_min_samples)\n\t\t{\n\t\t\t// If adaptive sampling is enabled, we only want to reuse a converged neighbor if the user allowed it\n\t\t\t// We also check whether or not we've reached the minimum amount of samples of adaptive sampling because\n\t\t\t// if adaptive sampling hasn't kicked in yet, there's no need to check whether the neighbor has converged or not yet\n\n\t\t\tif (spatial_pass_settings.allow_converged_neighbors_reuse)\n\t\t\t{\n\t\t\t\t// If we're allowing the reuse of converged neighbors, only doing so with a certain probability\n\n\t\t\t\tXorshift32Generator rng_converged_neighbor_reuse(render_data.random_number);\n\t\t\t\tif (rng_converged_neighbor_reuse() > spatial_pass_settings.converged_neighbor_reuse_probability)\n\t\t\t\t{\n\t\t\t\t\t// We didn't pass the probability check, we are not allowed to reuse the neighbor if it\n\t\t\t\t\t// has converged\n\n\t\t\t\t\tif (render_data.aux_buffers.pixel_converged_sample_count[neighbor_pixel_index] != -1)\n\t\t\t\t\t\t// The neighbor is indeed converged, returning invalid neighbor with -1\n\t\t\t\t\t\treturn -1;\n\t\t\t\t}\n\t\t\t}\n\t\t\telse if (render_data.aux_buffers.pixel_converged_sample_count[neighbor_pixel_index] != -1)\n\t\t\t\t// The user doesn't allow reusing converged neighbors and the neighbor is indeed converged\n\t\t\t\t// Returning -1 for invalid neighbor\n\t\t\t\treturn -1;\n\t\t}\n\t}\n\n\treturn neighbor_pixel_index;\n}\n\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE void spatial_neighbor_advance_rng(const HIPRTRenderData& render_data, Xorshift32Generator& rng)\n{\n\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\n\tif (spatial_pass_settings.do_adaptive_directional_spatial_reuse(render_data.render_settings.accumulate))\n\t{\n\t\t// If not using Hammersley, then each point is generated with 3 random numbers\n\t\t// \n\t\t// One for the random sector in the disk\n\t\t// One for the random theta within that sector\n\t\t// One for the random radius\n\t\t//\n\t\t// See the 'sample_spatial_neighbor_from_allowed_directions' function\n\t\trng();\n\t\trng();\n\t\trng();\n\t}\n\telse\n\t{\n\t\t// Two random numbers for sampling a neighbor in the disk\n\t\trng();\n\t\trng();\n\t}\n}\n\n/**\n * Counts how many neighbors are eligible for reuse.\n * This is needed for proper normalization by pairwise MIS weights.\n *\n * A neighbor is not eligible if it is outside of the viewport or if\n * it doesn't satisfy the normal/plane/roughness heuristics\n *\n * 'out_valid_neighbor_M_sum' is the sum of the M values (confidences) of the\n * valid neighbors. Used by confidence-weights pairwise MIS weights\n *\n * The bits of 'out_neighbor_heuristics_cache' are 1 or 0 depending on whether or not\n * the corresponding neighbor was valid or not (can be reused later to avoid having to\n * re-evauate the heuristics). Neighbor 0 is LSB.\n */\ntemplate <bool IsReSTIRGI>\nHIPRT_DEVICE HIPRT_INLINE void count_valid_spatial_neighbors(const HIPRTRenderData& render_data,\n\tconst ReSTIRSurface& center_pixel_surface,\n\tint2 center_pixel_coords,\n\tint& out_valid_neighbor_count, int& out_valid_neighbor_M_sum, int& out_neighbor_heuristics_cache)\n{\n\tout_valid_neighbor_count = 0;\n\n\tconst ReSTIRCommonSpatialPassSettings& spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\tXorshift32Generator spatial_neighbors_rng(spatial_pass_settings.spatial_neighbors_rng_seed);\n\n\tint center_pixel_index = center_pixel_coords.x + center_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\tint reused_neighbors_count = spatial_pass_settings.reuse_neighbor_count;\n\n\tfor (int neighbor_index = 0; neighbor_index < reused_neighbors_count; neighbor_index++)\n\t{\n\t\tunsigned long long int* spatial_reuse_hit_rate_hits = nullptr;\n\t\tunsigned long long int* spatial_reuse_hit_rate_total = nullptr;\n\n\t\tif (spatial_pass_settings.compute_spatial_reuse_hit_rate)\n\t\t\thippt::atomic_fetch_add(spatial_pass_settings.spatial_reuse_hit_rate_total, 1ull);\n\n\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<IsReSTIRGI>(render_data, neighbor_index, center_pixel_coords, spatial_neighbors_rng);\n\t\tif (neighbor_pixel_index == -1)\n\t\t\t// Neighbor out of the viewport\n\t\t\tcontinue;\n\n\t\tif (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data,\n\t\t\tneighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<IsReSTIRGI>(render_data, center_pixel_surface)))\n\t\t\tcontinue;\n\n\t\tif (spatial_pass_settings.compute_spatial_reuse_hit_rate)\n\t\t\thippt::atomic_fetch_add(spatial_pass_settings.spatial_reuse_hit_rate_hits, 1ull);\n\n\t\tout_valid_neighbor_M_sum += ReSTIRSettingsHelper::get_restir_spatial_pass_input_reservoir_M<IsReSTIRGI>(render_data, neighbor_pixel_index);\n\t\tout_valid_neighbor_count++;\n\t\tout_neighbor_heuristics_cache |= (1 << neighbor_index);\n\t}\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/ReSTIR/UtilsTemporal.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_UTILS_TEMPORAL_H\n#define DEVICE_RESTIR_UTILS_TEMPORAL_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE int2 apply_permutation_sampling(int2 pixel_position, int random_bits)\n{\n\tint2 offset = make_int2(random_bits & 3, (random_bits >> 2) & 3);\n\tpixel_position += offset;\n\n\tpixel_position.x ^= 3;\n\tpixel_position.y ^= 3;\n\n\tpixel_position -= offset;\n\n\treturn pixel_position;\n}\n/**\n * Returns a triplet (x, y, z) with\n *\tx the linear index that can be used directly to index a buffer\n *\tof render_data for getting data of the temporal neighbor. x is -1\n *\tif there is no valid temporal neighbor (disoccluion / occlusion / out of viewport)\n *\n *\t(y, z) the pixel coordinates of the backprojected temporal neighbor position\n *\tThese two values will always be filled even if the temporal neighbor is invalid\n *\t(disoccluion / occlusion / out of viewport)\n */\ntemplate <bool IsReSTIRGI>\nHIPRT_HOST_DEVICE HIPRT_INLINE int3 find_temporal_neighbor_index(const HIPRTRenderData& render_data,\n\tconst float3& current_shading_point, const float3& current_normal, int center_pixel_index, Xorshift32Generator& random_number_generator)\n{\n\tif (render_data.render_settings.accumulate)\n\t\t// If accumulating, the camera isn't moving, just returning\n\t\t// the current pixel index\n\t\treturn make_int3(center_pixel_index, center_pixel_index % render_data.render_settings.render_resolution.x, center_pixel_index / render_data.render_settings.render_resolution.x);\n\n\tconst ReSTIRCommonTemporalPassSettings& temporal_pass_settings = ReSTIRSettingsHelper::get_restir_temporal_pass_settings<IsReSTIRGI>(render_data);\n\n\tfloat3 previous_screen_space_point_xyz = matrix_X_point(render_data.prev_camera.view_projection, current_shading_point);\n\tfloat2 previous_screen_space_point = make_float2(previous_screen_space_point_xyz.x, previous_screen_space_point_xyz.y);\n\n\t// Bringing back in [0, 1] from [-1, 1]\n\tprevious_screen_space_point += make_float2(1.0f, 1.0f);\n\tprevious_screen_space_point *= make_float2(0.5f, 0.5f);\n\n\tint2 resolution = render_data.render_settings.render_resolution;\n\tfloat2 prev_pixel_float = make_float2(previous_screen_space_point.x * resolution.x, previous_screen_space_point.y * resolution.y);\n\t// Bringing back in the center of the pixel\n\tprev_pixel_float -= make_float2(0.5f, 0.5f);\n\n\t// We're going to randomly look for an acceptable neighbor around the back-projected pixel location to find\n\t// in a given radius\n\tint temporal_neighbor_index = -1;\n\tfor (int i = 0; i < temporal_pass_settings.max_neighbor_search_count + 1; i++)\n\t{\n\t\tfloat2 offset = make_float2(0.0f, 0.0f);\n\t\tif (i > 0)\n\t\t\t// Only randomly looking after we've at least checked whether or not the exact temporally reprojected location\n\t\t\t// is valid or not\n\t\t\toffset = make_float2(random_number_generator() - 0.5f, random_number_generator() - 0.5f) * temporal_pass_settings.neighbor_search_radius;\n\n\t\tint2 temporal_neighbor_screen_pixel_pos = make_int2(round(prev_pixel_float.x + offset.x), round(prev_pixel_float.y + offset.y));\n\t\tif (temporal_pass_settings.use_permutation_sampling && i == 0)\n\t\t\t// If we're looking at the direct temporal neighbor (without random offset), applying\n\t\t\t// permutation sampling if enabled\n\t\t\ttemporal_neighbor_screen_pixel_pos = apply_permutation_sampling(temporal_neighbor_screen_pixel_pos, temporal_pass_settings.permutation_sampling_random_bits);\n\n\t\tif (temporal_neighbor_screen_pixel_pos.x < 0 || temporal_neighbor_screen_pixel_pos.x >= resolution.x || temporal_neighbor_screen_pixel_pos.y < 0 || temporal_neighbor_screen_pixel_pos.y >= resolution.y)\n\t\t\t// Previous pixel is out of the current viewport\n\t\t\tcontinue;\n\n\t\ttemporal_neighbor_index = temporal_neighbor_screen_pixel_pos.x + temporal_neighbor_screen_pixel_pos.y * resolution.x;\n\n\t\t// We always want to read from the previous frame g-buffer for temporal neighbors\n\t\tbool use_previous_frame_g_buffer = true;\n\t\t// except if we're accumulating because then the camera is not moving --> no motion\n\t\t// --> temporal neighbor are on the same surface as the current -> the previous\n\t\t// g-buffer is the same as the current frame's --> no need to read from previous\n\t\t// frame g-buffer --> the previous frame G-buffer is deallocated to save VRAM\n\t\tuse_previous_frame_g_buffer &= render_data.render_settings.use_prev_frame_g_buffer();\n\t\tif (check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data, \n\t\t\ttemporal_neighbor_index, center_pixel_index, current_shading_point, current_normal, use_previous_frame_g_buffer))\n\t\t\t// We found a good neighbor\n\t\t\tbreak;\n\n\t\t// We didn't break so we didn't find a good neighbor\n\t\ttemporal_neighbor_index = -1;\n\t}\n\n\treturn make_int3(temporal_neighbor_index, static_cast<int>(round(prev_pixel_float.x)), static_cast<int>(round(prev_pixel_float.y)));\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/RussianRoulette.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RUSSIAN_ROULETTE_H\n#define RUSSIAN_ROULETTE_H\n\n#include \"HostDeviceCommon/RenderSettings.h\"\n\n/**\n * Returns false if the ray should be killed.\n * \n * This overload returns in 'throughput_scaling' the throughput scaling that\n * has been applied to 'ray_throughput' to account for the russian roulette surviving\n * probability\n * \n * If russian roulette wasn't applied, 'throughput_scaling' is left untouched\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool do_russian_roulette(const HIPRTRenderSettings& render_settings, int bounce, ColorRGB32F& ray_throughput, float& throughput_scaling, const ColorRGB32F& current_weight, Xorshift32Generator& random_number_generator)\n{\n    if (bounce >= render_settings.russian_roulette_min_depth && render_settings.do_russian_roulette)\n    {\n        float survive_probability = 0.0f;\n        if (render_settings.path_russian_roulette_method == PathRussianRoulette::MAX_THROUGHPUT)\n            // Easy max throughput threshold\n            survive_probability = ray_throughput.max_component();\n        else if (render_settings.path_russian_roulette_method == PathRussianRoulette::ARNOLD_2014)\n        {\n            // Reference:\n            // [Physically Based Shader Design in Arnold, Langlands, 2014]\n            survive_probability = (ray_throughput * current_weight).max_component() / ray_throughput.max_component();\n            survive_probability = sqrtf(survive_probability);\n        }\n\n        // Clamping anything above one back to 1\n        survive_probability = hippt::min(survive_probability, 1.0f);\n\n        if (random_number_generator() > survive_probability)\n            // Kill the ray\n            return false;\n\n        throughput_scaling = 1.0f / survive_probability;\n        if (render_settings.russian_roulette_throughput_clamp > 0.0f)\n            // Clamping the throughput increase to avoid fireflies by\n            // rays that still pass the russian roulette with very low\n            // probabilities\n            throughput_scaling = hippt::min(throughput_scaling, render_settings.russian_roulette_throughput_clamp);\n\n        ray_throughput *= throughput_scaling;\n    }\n\n    // The ray survived\n    return true;\n}\n\n/**\n * Returns false if the ray should be killed.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool do_russian_roulette(const HIPRTRenderSettings& render_settings, int bounce, ColorRGB32F& ray_throughput, const ColorRGB32F& current_weight, Xorshift32Generator& random_number_generator)\n{\n    float unused_throughput_scaling;\n    return do_russian_roulette(render_settings, bounce, ray_throughput, unused_throughput_scaling, current_weight, random_number_generator);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Sampling.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_SAMPLING_H\n#define DEVICE_SAMPLING_H\n\n#include \"Device/includes/Fresnel.h\"\n#include \"Device/includes/ONB.h\"\n#include \"Device/includes/Texture.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\n/**\n * Returns the radical inverse base 2 of a given number.\n * Used for generating 2D points following the Hammersley point set\n * \n * Reference: [Holger Dammertz, Hammersley Points on the Hemisphere] http://holger.dammertz.org/stuff/notes_HammersleyOnHemisphere.html\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float radical_inverse_base_2(unsigned int index) {\n    index = (index << 16u) | (index >> 16u);\n    index = ((index & 0x55555555u) << 1u) | ((index & 0xAAAAAAAAu) >> 1u);\n    index = ((index & 0x33333333u) << 2u) | ((index & 0xCCCCCCCCu) >> 2u);\n    index = ((index & 0x0F0F0F0Fu) << 4u) | ((index & 0xF0F0F0F0u) >> 4u);\n    index = ((index & 0x00FF00FFu) << 8u) | ((index & 0xFF00FF00u) >> 8u);\n    return float(index) * 2.3283064365386963e-10f; // / 0x100000000\n}\n\n/**\n * Generates a 2D point of the Hammersley point set given the total number\n * of points that are going to be sampled and the index of the point\n * (in [0, number_of_points -1]) that we're sampling right now\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float2 sample_hammersley_2D(unsigned int number_of_points, unsigned int point_index)\n{\n    return make_float2(static_cast<float>(point_index) / static_cast<float>(number_of_points), radical_inverse_base_2(point_index));\n}\n\n/**\n * Returns float pixel coordinates offset from the center of the disk\n * given the radius of the disk and two random numbers in [0, 1] u and v\n * \n * uv.x is used as theta for sampling the disk\n * uv.y is used for sampling the distance from the center of the disk\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float2 sample_in_disk_uv(float radius, float2 uv)\n{\n    float r_sqrt_v = radius * sqrtf(uv.y);\n    float x = r_sqrt_v * cos(M_TWO_PI * uv.x);\n    float y = r_sqrt_v * sin(M_TWO_PI * uv.x);\n\n    return make_float2(x, y);\n}\n\n/**\n * Returns integer pixel coordinates offset from the center of the disk of radius 'radius'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float2 sample_in_disk(float radius, Xorshift32Generator& random_number_generator)\n{\n    float u1 = random_number_generator();\n    float u2 = random_number_generator();\n\n    return sample_in_disk_uv(radius, make_float2(u1, u2));\n}\n\n/**\n * Power heuristic with a hardcoded Beta exponent of 2 and two sampling strategies only\n *\n * This implementation already contains the 1/nb_pdf_a fraction of the MIS estimator. This means\n * that you should not divide by 1/nb_pdf_a in the evaluation of your function where you use\n * the MIS weight\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float power_heuristic(float pdf_a, int nb_pdf_a, float pdf_b, int nb_pdf_b)\n{\n    float p_a_sqr = (nb_pdf_a * pdf_a) * (nb_pdf_a * pdf_a);\n    float p_b_sqr = (nb_pdf_b * pdf_b) * (nb_pdf_b * pdf_b);\n\n    // Note that we should have a multiplication by nb_pdf_a^2 in the\n    // numerator but because we're going to divide by nb_pdf_a in the\n    // function evaluation that use this MIS weight according to the\n    // MIS estimator, we're only multiplying by nb_pdf_a (not squared)\n    // since the squared nb_pdf_a would be cancelled by the division by\n    // nb_pdf_a\n    return nb_pdf_a * pdf_a * pdf_a / (p_a_sqr + p_b_sqr);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float power_heuristic(float pdf_a, float pdf_b)\n{\n    return power_heuristic(pdf_a, 1, pdf_b, 1);\n}\n\n/**\n * Balance heuristic for MIS weights computation\n *\n * This implementation already contains the 1/nb_pdf_a fraction of the MIS estimator. This means\n * that you should not divide by 1/nb_pdf_a in the evaluation of your function where you use\n * the MIS weight\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float balance_heuristic(float pdf_a, float nb_pdf_a, float pdf_b, float nb_pdf_b)\n{\n    if (pdf_a == 0.0f)\n        return 0.0f;\n\n    // Note that we should have a multiplication by nb_pdf_a in the\n    // numerator but because we're going to divide by nb_pdf_a in the\n    // function evaluation that use this MIS weight according to the\n    // MIS estimator, this multiplication in the numerator that we\n    // would have here would be canceled and that would be basically\n    // wasted maths so we're not doing it and we should not do it\n    // in the function evaluation either.\n    return pdf_a / (nb_pdf_a * pdf_a + nb_pdf_b * pdf_b);\n}\n\n/**\n * Balance heuristic for 3 strategies\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float balance_heuristic(float pdf_a, int nb_pdf_a, float pdf_b, int nb_pdf_b, int pdf_c, int nb_pdf_c)\n{\n    // Note that we should have a multiplication by nb_pdf_a in the\n    // numerator but because we're going to divide by nb_pdf_a in the\n    // function evaluation that use this MIS weight according to the\n    // MIS estimator, this multiplication in the numerator that we\n    // would have here would be canceled and that would be basically\n    // wasted maths so we're not doing it and we should not do it\n    // in the function evaluation either.\n    return pdf_a / (nb_pdf_a * pdf_a + nb_pdf_b * pdf_b + nb_pdf_c * pdf_c);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float balance_heuristic(float pdf_a, float pdf_b)\n{\n    return balance_heuristic(pdf_a, 1, pdf_b, 1);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float balance_heuristic(float pdf_a, float pdf_b, float pdf_c)\n{\n    return balance_heuristic(pdf_a, 1, pdf_b, 1, pdf_c, 1);\n}\n\n/**\n * Reflects a ray about a normal. This function requires that dot(ray_direction, surface_normal) > 0 i.e.\n * ray_direction and surface_normal are in the same hemisphere\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 reflect_ray(const float3& ray_direction, const float3& surface_normal)\n{\n    return 2.0f * hippt::dot(ray_direction, surface_normal) * surface_normal - ray_direction;\n}\n\n/**\n * Refracts a ray about a normal. This function requires that dot(ray_direction, surface_normal) > 0 i.e.\n * ray_direction and surface_normal are in the same hemisphere\n * \n * relative_eta here must be eta_t / eta_i\n * \n * No total internal reflection is assumed\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 refract_ray(const float3& ray_direction, const float3& surface_normal, float relative_eta)\n{\n    float NoI = hippt::dot(ray_direction, surface_normal);\n\n    float sin_theta_i_2 = 1.0f - NoI * NoI;\n    float root_term = 1.0f - sin_theta_i_2 / (relative_eta * relative_eta);\n\n    float cos_theta_t = sqrt(root_term);\n    float3 refract_direction = -ray_direction / relative_eta + (NoI / relative_eta - cos_theta_t) * surface_normal;\n\n    return refract_direction;\n}\n\n/** \n * Reference:\n * \n * [1] [Lambertian Reflection Without Tangents], Edd Biddulph https://fizzer.neocities.org/lambertnotangent\n * \n * The sampled direction is returned in world space\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 cosine_weighted_sample_around_normal_world_space(const float3& normal, Xorshift32Generator& random_number_generator)\n{\n    float rand_1 = random_number_generator();\n    float rand_2 = 2.0f * random_number_generator() - 1.0f;\n    if (rand_1 < 1.0e-8f && rand_2 < -0.999999f && normal.z > 0.999999f)\n    {\n        // Slight perturbation when this would result in a singularity:\n        // When rand_1 is 0.0f and rand_2 is -1.0f, this results in a theta\n        // of 0.0f which then gives sphere_point = {0.0f, 0.0f, -1.0f}. In\n        // conjunction with a normal of {0.0f, 0.0f, 1.0f}, we get a null vector\n        // at the return statement that is then normalized --> NaN\n        rand_1 += 1.0e-7f;\n        rand_2 += 1.0e-7f;\n    }\n\n    float theta = M_TWO_PI * rand_1;\n\n    float2 xy = sqrt(1.0f - rand_2 * rand_2) * make_float2(cos(theta), sin(theta));\n    float3 sphere_point = make_float3(xy.x, xy.y, rand_2);\n\n    return hippt::normalize(normal + sphere_point);\n}\n\n/**\n * Reference:\n *\n * [1] [Global Illumination Compendium], https://people.cs.kuleuven.be/~philip.dutre/GI/TotalCompendium.pdf\n *\n * The sampled direction is returned in a local frame with Z as the up axis\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 cosine_weighted_sample_z_up_frame(Xorshift32Generator& random_number_generator)\n{\n    float r1 = random_number_generator();\n    float r2 = random_number_generator();\n\n    float phi = M_TWO_PI * r1;\n    float cos_theta = sqrt(r2);\n    float sin_theta = sqrt(1 - cos_theta * cos_theta);\n\n    return hippt::normalize(make_float3(cos(phi) * sin_theta, sin(phi) * sin_theta, cos_theta));\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/SanityCheck.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_SANITY_CHECK_H\n#define DEVICE_INCLUDES_SANITY_CHECK_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n#ifndef __KERNELCC__\n#include \"Utils/Utils.h\" // For debugbreak in sanity_check()\n\n // For logging stuff on the CPU and avoid everything being mixed\n // up in the terminal because of multithreading\n#include <mutex>\nstd::mutex g_mutex;\n#endif\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void debug_set_final_color(const HIPRTRenderData& render_data, int x, int y, ColorRGB32F final_color)\n{\n    if (render_data.render_settings.sample_number == 0)\n        render_data.buffers.accumulated_ray_colors[y * render_data.render_settings.render_resolution.x + x] = final_color;\n    else\n        render_data.buffers.accumulated_ray_colors[y * render_data.render_settings.render_resolution.x + x] = final_color * render_data.render_settings.sample_number;\n}\n\n/**\n * Returns true if the color has a negative component.\n * False otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE bool check_for_negative_color(ColorRGB32F ray_color, int x, int y, int sample)\n{\n    // To remove 'unused variable' warnings of the GPU compiler because these variables are only used\n    // in the std::cout of the CPU\n    (void)x;\n    (void)y;\n    (void)sample;\n\n    if (ray_color.r < 0 || ray_color.g < 0 || ray_color.b < 0)\n    {\n#ifndef __KERNELCC__\n        std::cout << \"Negative color at [\" << x << \", \" << y << \"], sample \" << sample << std::endl;\n#endif\n\n        return true;\n    }\n\n    return false;\n}\n\n/**\n * Returns true if the color has a NaN or INF component.\n * False otherwise\n */ \nHIPRT_HOST_DEVICE HIPRT_INLINE bool check_for_nan(ColorRGB32F ray_color, int x, int y, int sample)\n{\n    // To avoid unused variables on the GPU\n    (void)x;\n    (void)y;\n    (void)sample;\n\n    if (hippt::is_nan(ray_color.r) || hippt::is_nan(ray_color.g) || hippt::is_nan(ray_color.b) ||\n        hippt::is_inf(ray_color.r) || hippt::is_inf(ray_color.g) || hippt::is_inf(ray_color.b))\n    {\n#ifndef __KERNELCC__\n        std::lock_guard<std::mutex> logging_lock(g_mutex);\n        std::cout << \"NaN/INF at [\" << x << \", \" << y << \"], sample\" << sample << std::endl;\n#endif\n        return true;\n    }\n\n    return false;\n}\n\ntemplate <bool CheckOnlyOnCPU = false>\nHIPRT_HOST_DEVICE HIPRT_INLINE bool sanity_check(const HIPRTRenderData& render_data, ColorRGB32F& in_out_color, int x, int y)\n{\n    if constexpr (CheckOnlyOnCPU)\n    {\n#ifdef __KERNELCC__\n        return true;\n#endif\n    }\n\n    bool valid = true;\n\n    valid &= !check_for_negative_color(in_out_color, x, y, render_data.render_settings.sample_number);\n    valid &= !check_for_nan(in_out_color, x, y, render_data.render_settings.sample_number);\n\n    if (!valid)\n    {\n#ifndef __KERNELCC__\n        Utils::debugbreak();\n#endif\n\n        if (render_data.render_settings.display_NaNs)\n            debug_set_final_color(render_data, x, y, ColorRGB32F(1.0e30f, 0.0f, 1.0e30f));\n        else\n            in_out_color = ColorRGB32F(0.0f);\n    }\n\n    return valid;\n}\n\ntemplate <bool CheckOnlyOnCPU = false>\nHIPRT_HOST_DEVICE HIPRT_INLINE bool sanity_check(const HIPRTRenderData& render_data, const ColorRGB32F& in_out_color, int x, int y)\n{\n    ColorRGB32F copy = in_out_color;\n    return sanity_check(render_data, copy, x, y);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/Texture.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_TEXTURE_H\n#define DEVICE_TEXTURE_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/TriangleStructures.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n#ifndef __KERNELCC__\n#include \"Image/Image.h\"\n#endif\n\n#ifdef __KERNELCC__\n// Dummy usings so that the GPU compiler doesn't complain that Image8Bit / Image32Bit don't exist.\n// It's okay to dummy use them as int because they are not used on the GPU side anyway, this is\n// purely for the compiler to be happy\nusing Image8Bit = int;\nusing Image32Bit = int;\n#endif\n\n/**\n * Templated here so that the CPU can cast the texture_buffer into Image8Bit or Image32Bit\n * for proper sampling in unsigned char or float respectively.\n * This template argument isn't used on the GPU and that's why Image8Bit and Image32Bit\n * are being defined as 'ints'\n * \n * If 'flip_uv_y' is true, then UV (0, 0) is the bottom left corner of the texture\n * and the texture must use a wrapping address mode for the V coordinate.\n * \n * If 'flip_uv_y' is true, then the UV coordinates are just used as is\n */ \ntemplate <typename ImageType = Image8Bit>\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F sample_texture_rgba(const void* texture_buffer, int texture_index, bool is_srgb, float2 uv, bool flip_uv_y = true)\n{\n    ColorRGBA32F rgba;\n\n#ifdef __KERNELCC__\n    // We're doing the UV addressing ourselves since it seems to be broken in Orochi...\n    float u = uv.x;\n    float v = uv.y;\n    if (flip_uv_y)\n        v = -v;\n\n    if (reinterpret_cast<const oroTextureObject_t*>(texture_buffer)[texture_index] == 0)\n        return ColorRGBA32F(0.0f);\n\n    rgba = ColorRGBA32F(tex2D<float4>(reinterpret_cast<const oroTextureObject_t*>(texture_buffer)[texture_index], u, v));\n#else\n    const ImageType& texture = reinterpret_cast<const ImageType*>(texture_buffer)[texture_index];\n\n    rgba = texture.sample_rgba32f(uv);\n#endif\n\n    // sRGB to linear conversion\n    // Doing the conversion manually instead of using the hardware\n    // because it's unavailable in Orochi (again) :(\n    if (is_srgb)\n        return intrin_pow(rgba, 2.2f);\n    else\n        return rgba;\n}\n\n/**\n * If 'flip_uv_y' is true, then UV(0, 0) is the bottom left corner of the texture\n * and the texture must use a wrapping address mode for the V coordinate.\n *\n * If 'flip_uv_y' is true, then the UV coordinates are just used as is\n * \n * 'flip_uv_y' should basically be set to true in most cases and.\n * It should be set to false if your texture addressing mode isn't 'warping'\n * or when you know what you're doing and why you need to have it to false\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_texture_rgb_8bits(const void* texture_buffer, int texture_index, bool is_srgb, float2 uv, bool flip_uv_y = true)\n{\n    ColorRGBA32F rgba = sample_texture_rgba<Image8Bit>(texture_buffer, texture_index, is_srgb, uv, flip_uv_y);\n\n    return ColorRGB32F(rgba.r, rgba.g, rgba.b);\n}\n\n/**\n * Samples a texture given by indexing the texture array 'texture_buffer' with 'texture_buffer[texture_index]'.\n * \n * To read from a single texture, pass the pointer to the texture in 'texture_buffer' and\n * pass texture_index = 0\n * \n * Not that on the GPU, 'texture_buffer' must be of type oroTextureObject_t*, i.e. it's a pointer on oroTextureObject_t\n * this means that if the pointer is set in RenderData with OrochiTexture::get_device_texture() on the CPU, then\n * &get_device_texture() must be passed to this function for 'texture_buffer'\n * \n * If 'flip_uv_y' is true, then UV(0, 0) is the bottom left corner of the texture\n * and the texture must use a wrapping address mode for the V coordinate.\n *\n * If 'flip_uv_y' is true, then the UV coordinates are just used as is\n * \n * 'flip_uv_y' should basically be set to true in most cases and.\n * It should be set to false if your texture addressing mode isn't 'warping'\n * or when you know what you're doing and why you need to have it to false\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_texture_rgb_32bits(const void* texture_buffer, int texture_index, bool is_srgb, float2 uv, bool flip_uv_y = true)\n{\n    ColorRGBA32F rgba = sample_texture_rgba<Image32Bit>(texture_buffer, texture_index, is_srgb, uv, flip_uv_y);\n\n    return ColorRGB32F(rgba.r, rgba.g, rgba.b);\n}\n\n#ifdef __KERNELCC__\n/**\n * Bilinearly samples around x & y on the layer z of a 3D texture configured for\n * nearest neighbor sampling\n * \n * uv is supposed to be in [0, 1] already\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F internal_bilinear_sample_on_3D_texture(const oroTextureObject_t texture, int3 ires, float2 uv, int z)\n{\n    // Reference: https://iquilezles.org/articles/hwinterpolation/\n\n    float2 res_f = make_float2(ires.x, ires.y);\n\n    float2 st = (uv - 0.5f / res_f) * res_f;\n    int2 i = make_int2(floorf(st.x), floorf(st.y));\n    float2 w = make_float2(hippt::fract(st.x), hippt::fract(st.y));\n\n    ColorRGBA32F a = ColorRGBA32F(tex3D<float4>(texture, i.x + 0, i.y + 0, z));\n    ColorRGBA32F b = ColorRGBA32F(tex3D<float4>(texture, i.x + 1, i.y + 0, z));\n    ColorRGBA32F c = ColorRGBA32F(tex3D<float4>(texture, i.x + 0, i.y + 1, z));\n    ColorRGBA32F d = ColorRGBA32F(tex3D<float4>(texture, i.x + 1, i.y + 1, z));\n\n    return hippt::lerp(hippt::lerp(a, b, w.x), hippt::lerp(c, d, w.x), w.y);\n}\n#endif\n\n/**\n * This function samples a 3D texture given in the 'texture' parameter\n * This parameter should be an oroTextureObject_t on the GPU, not a\n * pointer 'oroTextureObject_t*' as is the case for 'sample_texture_rgb_32bits'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_texture_3D_rgb_32bits(void* texture, int3 texture_dims, float3 uvw, bool hardware_interpolation = false)\n{\n    if (texture == nullptr)\n        return ColorRGB32F(0.0f);\n\n#ifdef __KERNELCC__\n    // Sampling in repeat mode so we're just keeping the fractional part\n    float u = uvw.x;\n    if (u != 1.0f)\n        // Only doing that if u != 1.0f because if we actually have\n        // uv.x == 1.0f, then subtracting static_cast<int>(uv.x) will\n        // give us 0.0f even though we actually want 1.0f (which is correct).\n        // \n        // Basically, 1.0f gets transformed into 0.0f even though 1.0f is a correct\n        // U coordinate which needs not to be wrapped\n        u = hippt::fract(uvw.x);\n\n    float v = uvw.y;\n    if (v != 1.0f)\n        // Same for v\n        v = hippt::fract(uvw.y);\n\n    float w = uvw.z;\n    if (w != 1.0f)\n        // Same for w\n        w = hippt::fract(uvw.z);\n\n    // For negative UVs, we also want to repeat and we want, for example, \n    // -0.1f to behave as 0.9f\n    u = u < 0 ? 1.0f + u : u;\n    v = v < 0 ? 1.0f + v : v;\n    w = w < 0 ? 1.0f + w : w;\n\n    // Sampling with [0, 0] bottom-left convention\n    v = 1.0f - v;\n\n    if (hardware_interpolation)\n    {\n        float x = (u * (texture_dims.x - 1));\n        float y = (v * (texture_dims.y - 1));\n        float z = (w * (texture_dims.z - 1));\n\n        return ColorRGB32F(ColorRGBA32F(tex3D<float4>(reinterpret_cast<oroTextureObject_t>(texture), x, y, z)));\n    }\n    else\n    {\n        float z = (w * (texture_dims.z - 1));\n\n        // Whether or not we need to interpolate with layer z+1 or z-1\n        bool z_layer_up = hippt::fract(w * texture_dims.z) > 0.5f;\n        int z0 = z;\n        int z1 = z_layer_up ? z0 + 1 : z0 - 1;\n\n        ColorRGBA32F rgba0 = internal_bilinear_sample_on_3D_texture(reinterpret_cast<oroTextureObject_t>(texture), texture_dims, make_float2(u, v), z0);\n        ColorRGBA32F rgba1 = internal_bilinear_sample_on_3D_texture(reinterpret_cast<oroTextureObject_t>(texture), texture_dims, make_float2(u, v), z1);\n\n        return ColorRGB32F(hippt::lerp(rgba0, rgba1, w));\n    }\n#else\n    const Image32Bit3D& image = *reinterpret_cast<const Image32Bit3D*>(texture);\n    ColorRGBA32F rgba = image.sample_rgba32f(uvw);\n\n    return ColorRGB32F(rgba);\n#endif\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sample_environment_map_texture(const WorldSettings& world_settings, float2 uv)\n{\n#if EnvmapSamplingDoBilinearFiltering == KERNEL_OPTION_TRUE\n    float x = uv.x * (world_settings.envmap_width - 1);\n    float y = uv.y * (world_settings.envmap_height - 1);\n\n    float x_frac = hippt::fract(x);\n    float y_frac = hippt::fract(y);\n\n    int x_i = (int)x;\n    int x_i_1 = hippt::min(x_i + 1, (int)world_settings.envmap_width - 1);\n    int y_i = (int)y;\n    int y_i_1 = hippt::min(y_i + 1, (int)world_settings.envmap_height - 1);\n\n    int index_x0y0 = x_i + y_i * world_settings.envmap_width;\n    int index_x1y0 = x_i_1 + y_i * world_settings.envmap_width;\n    int index_x0y1 = x_i + y_i_1 * world_settings.envmap_width;\n    int index_x1y1 = x_i_1 + y_i_1 * world_settings.envmap_width;\n\n    ColorRGB32F color_x0y0 = world_settings.envmap[index_x0y0].unpack() * world_settings.envmap_intensity;\n    ColorRGB32F color_x1y0 = world_settings.envmap[index_x1y0].unpack() * world_settings.envmap_intensity;\n    ColorRGB32F color_x0y1 = world_settings.envmap[index_x0y1].unpack() * world_settings.envmap_intensity;\n    ColorRGB32F color_x1y1 = world_settings.envmap[index_x1y1].unpack() * world_settings.envmap_intensity;\n\n    return hippt::lerp(hippt::lerp(color_x0y0, color_x1y0, x_frac), hippt::lerp(color_x0y1, color_x1y1, x_frac), y_frac);\n#else\n    int x = uv.x * (world_settings.envmap_width - 1);\n    int y = uv.y * (world_settings.envmap_height - 1);\n    int index = x + y * world_settings.envmap_width;\n\n    return world_settings.envmap[index].unpack() * world_settings.envmap_intensity;\n#endif\n}\n\n/**\n * Given the indices of the vertices of a triangle, interpolates the vertices data found\n * in the 'data' buffer passed as argument as the given UV coordinates\n */\ntemplate <typename T>\nHIPRT_HOST_DEVICE HIPRT_INLINE T uv_interpolate(int vertex_A_index, int vertex_B_index, int vertex_C_index, T* data, float2 uv)\n{\n    return data[vertex_B_index] * uv.x + data[vertex_C_index] * uv.y + data[vertex_A_index] * (1.0f - uv.x - uv.y);\n}\n\n/**\n * Given the indices of the vertices of a triangle, interpolates the vertices data found\n * in the 'data' buffer passed as argument as the given UV coordinates\n */\ntemplate <typename T>\nHIPRT_HOST_DEVICE HIPRT_INLINE T uv_interpolate(TriangleIndices triangle_vertex_indices, T* data, float2 uv)\n{\n    return uv_interpolate(triangle_vertex_indices.x, triangle_vertex_indices.y, triangle_vertex_indices.z, data, uv);\n}\n\n/**\n * Just a simple \"specialization\" for when we're interpolating texcoords\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float2 uv_interpolate(TriangleTexcoords texcoords, float2 uv)\n{\n    return texcoords.y * uv.x + texcoords.z * uv.y + texcoords.x * (1.0f - uv.x - uv.y);\n}\n\n/**\n * Same as the overloads above but you can call this one when you don't already have the vertex indices\n * from the place you're calling this function from. This function will then fetch the vertex indices again\n */\ntemplate <typename T>\nHIPRT_HOST_DEVICE HIPRT_INLINE T uv_interpolate(int* vertex_indices, int primitive_index, T* data, float2 uv)\n{\n    int vertex_A_index = vertex_indices[primitive_index * 3 + 0];\n    int vertex_B_index = vertex_indices[primitive_index * 3 + 1];\n    int vertex_C_index = vertex_indices[primitive_index * 3 + 2];\n\n    return uv_interpolate(vertex_A_index, vertex_B_index, vertex_C_index, data, uv);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/TriangleStructures.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_TRIANGLE_STRUCTURES_H\n#define DEVICE_TRIANGLE_STRUCTURES_H\n\n#include \"HostDeviceCommon/Math.h\"\n\n/**\n * Structure that contains the vertex index (in the vertex buffer) of the 3 vertices of a triangle\n */\nstruct TriangleIndices\n{\n    int x; // vertex A\n    int y; // vertex B\n    int z; // vertex C\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE TriangleIndices load_triangle_vertex_indices(int* triangle_indices_buffer, int primitive_index)\n{\n    int primitive_index_3 = primitive_index * 3;\n\n    return TriangleIndices\n    {\n        triangle_indices_buffer[primitive_index_3 + 0],\n        triangle_indices_buffer[primitive_index_3 + 1],\n        triangle_indices_buffer[primitive_index_3 + 2]\n    };\n}\n\n/**\n * Structure that contains the UV texcoords a the 3 vertices of a triangle\n */\nstruct TriangleTexcoords\n{\n    float2 x; // vertex A\n    float2 y; // vertex B\n    float2 z; // vertex C\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE TriangleTexcoords load_triangle_texcoords(float2* texcoords_buffer, TriangleIndices triangle_vertex_indices)\n{\n    return TriangleTexcoords\n    {\n        texcoords_buffer[triangle_vertex_indices.x],\n        texcoords_buffer[triangle_vertex_indices.y],\n        texcoords_buffer[triangle_vertex_indices.z]\n    };\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/includes/WarpDirectionReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_INCLUDES_WARP_DIRECTION_REUSE_H\n#define DEVICE_INCLUDES_WARP_DIRECTION_REUSE_H\n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n\n#include \"HostDeviceCommon/HitInfo.h\"\n\n/**\n * Experimental implementation of [Generate Coherent Rays Directly, Liu et al., 2024]\n * \n * This incomplete implementation supposes that all threads in the warp have the same material\n * type and this does not implement the \"interleaved groups\" approach to reduce correlation\n * \n * Preliminary results only show a 10% boost in perf, even without correlation reduction and on\n * the Bistro (which is an expensive scene to trace). Because the correlations were pretty bad,\n * the implementation of the paper was discontinued\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void warp_direction_reuse(const HIPRTRenderData& render_data, const HitInfo& closest_hit_info, RayPayload& ray_payload, float3 view_direction, float3& in_out_bounce_direction, ColorRGB32F& out_bsdf_color, float& out_bsdf_pdf, int bounce, Xorshift32Generator& random_number_generator)\n{\n    if (bounce == 0)\n    {\n        // Direction reuse is only done on the first bounce because the efficiency largely decreases at later bounces\n        \n        unsigned int active_mask = hippt::warp_activemask();\n        unsigned int first_active_thread_index = hippt::ffs(active_mask) - 1;\n\n        float3 local_direction = world_to_local_frame(closest_hit_info.shading_normal, in_out_bounce_direction);\n\n        local_direction.x = hippt::warp_shfl(local_direction.x, first_active_thread_index);\n        local_direction.y = hippt::warp_shfl(local_direction.y, first_active_thread_index);\n        local_direction.z = hippt::warp_shfl(local_direction.z, first_active_thread_index);\n\n        in_out_bounce_direction = local_to_world_frame(closest_hit_info.shading_normal, local_direction);\n\n        BSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n        BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, in_out_bounce_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness);\n        out_bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, out_bsdf_pdf, random_number_generator);\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernel_parameters/NEE++/NEEPlusPlusCachingPrepassParameters.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef NEE_PLUS_PLUS_CACHING_PREPASS_KERNEL_PARAMETERS_H\n#define NEE_PLUS_PLUS_CACHING_PREPASS_KERNEL_PARAMETERS_H\n\n#include \"Device/includes/NEE++/NEE++.h\"\n\nstruct NEEPlusPlusCachingPrepassParameters\n{\n\tNEEPlusPlusDevice nee_plus_plus;\n\n\tHIPRTCamera current_camera;\n\n\tunsigned int random_number = 42;\n};\n\n#endif"
  },
  {
    "path": "src/Device/kernel_parameters/ReSTIR/DI/LightPresamplingParameters.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef LIGHT_PRESAMPLING_KERNEL_PARAMETERS_H\n#define LIGHT_PRESAMPLING_KERNEL_PARAMETERS_H\n\n#include \"Device/includes/ReSTIR/DI/PresampledLight.h\"\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n\n#include \"HostDeviceCommon/Material/MaterialPackedSoA.h\"\n#include \"HostDeviceCommon/WorldSettings.h\"\n\nstruct LightPresamplingParameters\n{\n\t/**\n\t * Parameters specific to the kernel\n\t */\n\n\t// From all the lights of the scene, how many subsets to presample\n\tint number_of_subsets = 128;\n\t// How many lights to presample in each subset\n\tint subset_size = 1024;\n\t// Buffer that holds the presampled lights\n\tReSTIRDIPresampledLight* out_light_samples;\n\n\t// For each presampled light, the probability that this is going to be an envmap sample\n\tfloat envmap_sampling_probability;\n};\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/Baking/GGXConductorDirectionalAlbedo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/Baker/GGXConductorDirectionalAlbedoSettings.h\"\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/BSDFs/Microfacet.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n /* References:\n * [1][Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n * [2][Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n * [3][Dassault Enterprise PBR 2025 Specification]\n * [4][Google - Physically Based Rendering in Filament]\n * [5][MaterialX codebase on Github]\n * [6][Blender's Cycles codebase on Github]\n *\n * This kernel computes the directional albedo of a conductor BRDF for use\n * in energy compensation code (MicrofacetEnergyCompensation.h) as proposed in\n * [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n *\n * The kernel outputs its results in one buffer (which is then written to disk as a texture).\n * The texture is parameterized by cos_theta_o (cosine view direction) and the roughness of\n * the BRDF\n */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXConductorDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXConductorDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXConductorDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXConductorDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta);\n\n    if (x >= bake_settings.texture_size_cos_theta || y >= bake_settings.texture_size_roughness)\n        return;\n\n    Xorshift32Generator random_number_generator(wang_hash(pixel_index + 1) * current_iteration);\n\n    float roughness = 1.0f / (bake_settings.texture_size_roughness - 1.0f) * y;\n    roughness = hippt::max(roughness, 1.0e-4f);\n\n    float cos_theta_o = 1.0f / (bake_settings.texture_size_cos_theta - 1.0f) * x;\n    cos_theta_o = hippt::max(GGX_DOT_PRODUCTS_CLAMP, cos_theta_o);\n    float sin_theta_o = sin(acos(cos_theta_o));\n\n    float3 local_view_direction = hippt::normalize(make_float3(cos(0.0f) * sin_theta_o, sin(0.0f) * sin_theta_o, cos_theta_o));\n\n    int iterations_per_kernel = floor(hippt::max(1.0f, (float)GPUBakerConstants::COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH / (float)(bake_settings.texture_size_cos_theta * bake_settings.texture_size_roughness)));\n    int nb_kernel_launch = ceil(bake_settings.integration_sample_count / (float)iterations_per_kernel);\n    int nb_samples = nb_kernel_launch * iterations_per_kernel;\n\n    for (int sample = 0; sample < kernel_iterations; sample++)\n    {\n        float3 sampled_local_to_light_direction = microfacet_GGX_sample_reflection(roughness, 0.0f, local_view_direction, random_number_generator);\n        if (sampled_local_to_light_direction.z < 0)\n            // Sampled direction below surface\n            continue;\n\n        HIPRTRenderData dummy_render_data;\n        // Just updating the masking shadowing term\n        dummy_render_data.bsdfs_data.GGX_masking_shadowing = bake_settings.masking_shadowing_term;\n\n        float eval_pdf;\n        float directional_albedo = torrance_sparrow_GGX_eval_reflect<0>(dummy_render_data,\n            roughness, 0.0f, false, /* fresnel */ ColorRGB32F(1.0f),\n            local_view_direction, sampled_local_to_light_direction, hippt::normalize(local_view_direction + sampled_local_to_light_direction), \n            eval_pdf, MaterialUtils::SPECULAR_PEAK_SAMPLED, 0).r;\n\n        directional_albedo /= eval_pdf;\n        directional_albedo *= sampled_local_to_light_direction.z;\n\n        out_buffer[pixel_index] += directional_albedo / nb_samples;\n    }\n}\n"
  },
  {
    "path": "src/Device/kernels/Baking/GGXFresnelDirectionalAlbedo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/Baker/GGXFresnelDirectionalAlbedoSettings.h\"\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/BSDFs/Microfacet.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n /* References:\n * [1][Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n * [2][Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n * [3][Dassault Enterprise PBR 2025 Specification]\n * [4][Google - Physically Based Rendering in Filament]\n * [5][MaterialX codebase on Github]\n * [6][Blender's Cycles codebase on Github]\n *\n * This kernel computes the directional albedo of the Torrance Sparrow BRDF\n * with the GGX distribution with a varying fresnel term (i.e. a dielectric GGX lobe)\n */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXFresnelDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXFresnelDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXFresnelDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXFresnelDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta + z * bake_settings.texture_size_cos_theta * bake_settings.texture_size_roughness);\n\n    if (x >= bake_settings.texture_size_cos_theta || y >= bake_settings.texture_size_roughness || z >= bake_settings.texture_size_ior)\n        return;\n\n    Xorshift32Generator random_number_generator(wang_hash(pixel_index + 1) * current_iteration);\n\n    float roughness = 1.0f / (bake_settings.texture_size_roughness - 1.0f) * y;\n    roughness = hippt::max(roughness, 1.0e-4f);\n\n    float cos_theta_o = 1.0f / (bake_settings.texture_size_cos_theta - 1.0f) * x;\n    cos_theta_o = hippt::max(GGX_DOT_PRODUCTS_CLAMP, cos_theta_o);\n    cos_theta_o = powf(cos_theta_o, 2.5f);\n    float sin_theta_o = sin(acos(cos_theta_o));\n\n    // Integrates for interface reflectivities of IORs between 1.0f and 3.0f\n    float F0 = 1.0f / (bake_settings.texture_size_ior - 1.0f) * z;\n    // Relative eta (eta_t / eta_i) from F0\n    // Using F0^4 to get more precision near 0\n    F0 *= F0; // F0^2\n    F0 *= F0; // F0^4\n    float sqrt_F0 = sqrtf(hippt::clamp(0.0f, 0.99f, F0));\n    float relative_ior = (1.0f + sqrt_F0) / (1.0f - sqrt_F0);\n\n    float3 local_view_direction = hippt::normalize(make_float3(cos(0.0f) * sin_theta_o, sin(0.0f) * sin_theta_o, cos_theta_o));\n\n    int iterations_per_kernel = floor(hippt::max(1.0f, (float)GPUBakerConstants::COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH / (float)(bake_settings.texture_size_cos_theta * bake_settings.texture_size_roughness)));\n    int nb_kernel_launch = ceil(bake_settings.integration_sample_count / (float)iterations_per_kernel);\n    int nb_samples = nb_kernel_launch * iterations_per_kernel;\n\n    for (int sample = 0; sample < kernel_iterations; sample++)\n    {\n        float3 sampled_local_to_light_direction = microfacet_GGX_sample_reflection(roughness, 0.0f, local_view_direction, random_number_generator);\n        if (sampled_local_to_light_direction.z < 0)\n            // Sampled direction below surface\n            continue;\n\n        ColorRGB32F F = ColorRGB32F(full_fresnel_dielectric(sampled_local_to_light_direction.z, relative_ior));\n\n        HIPRTRenderData dummy_render_data;\n        // Just updating the masking shadowing term\n        dummy_render_data.bsdfs_data.GGX_masking_shadowing = bake_settings.masking_shadowing_term;\n\n        float eval_pdf;\n        float directional_albedo = torrance_sparrow_GGX_eval_reflect<0>(dummy_render_data, roughness, /* anisotropy */ 0.0f, false, F, \n            local_view_direction, sampled_local_to_light_direction, hippt::normalize(local_view_direction + sampled_local_to_light_direction), eval_pdf, \n            MaterialUtils::SPECULAR_PEAK_SAMPLED, 0).r;\n        directional_albedo /= eval_pdf;\n        directional_albedo *= sampled_local_to_light_direction.z;\n\n        out_buffer[pixel_index] += directional_albedo / nb_samples;\n    }\n}\n"
  },
  {
    "path": "src/Device/kernels/Baking/GGXGlassDirectionalAlbedo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/BSDFs/Principled.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include \"Renderer/Baker/GGXGlassDirectionalAlbedoSettings.h\"\n\n/* References:\n* [1][Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n* [2][Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n* [3][Dassault Enterprise PBR 2025 Specification]\n* [4][Google - Physically Based Rendering in Filament]\n* [5][MaterialX codebase on Github]\n* [6][Blender's Cycles codebase on Github]\n* \n* This kernel computes the directional albedo of a glass BSDF for use\n* in energy compensation code (MicrofacetEnergyCompensation.h) as proposed in\n* [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n* \n* The kernel outputs its results in two buffers (which are then written to disk as textures).\n* The two textures are parameterized by cos_theta_o (cosine view direction), the roughness of\n* the BSDF and the reflectance at normal incidence F0 which relates to the relative IOR at\n* the interface of the BSDF\n* \n* The first texture is the directional albedo precomputation when hitting the object\n* from the outside\n* The second texture is used when inside the object: its IOR is simply inversed\n*/\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float GGX_glass_E_eval(float relative_ior, float roughness, const float3& local_view_direction, const float3& local_to_light_direction, float& pdf, GGXMaskingShadowingFlavor masking_shadowing_term)\n{\n    pdf = 0.0f;\n\n    float NoV = local_view_direction.z;\n    float NoL = local_to_light_direction.z;\n\n    if (hippt::abs(NoL) < 1.0e-8f)\n        // Check to avoid dividing by 0 later on\n        return 0.0f;\n\n    // We're in the case of reflection if the view direction and the bounced ray (light direction) are in the same hemisphere\n    bool reflecting = NoL * NoV > 0;\n\n    if (hippt::abs(relative_ior - 1.0f) < 1.0e-5f)\n        relative_ior = 1.0f + 1.0e-5f;\n\n    // Computing the generalized (that takes refraction into account) half vector\n    float3 local_half_vector;\n    if (reflecting)\n        local_half_vector = local_to_light_direction + local_view_direction;\n    else\n        // We need to take the relative_ior into account when refracting to compute\n        // the half vector (this is the \"generalized\" part of the half vector computation)\n        local_half_vector = local_to_light_direction * relative_ior + local_view_direction;\n\n    local_half_vector = hippt::normalize(local_half_vector);\n    if (local_half_vector.z < 0.0f)\n        // Because the rest of the function we're going to compute here assume\n        // that the microfacet normal is in the same hemisphere as the surface\n        // normal, we're going to flip it if needed\n        local_half_vector = -local_half_vector;\n\n    float HoL = hippt::dot(local_to_light_direction, local_half_vector);\n    float HoV = hippt::dot(local_view_direction, local_half_vector);\n\n    if (HoL * NoL < 0.0f || HoV * NoV < 0.0f)\n        // Backfacing microfacets when the microfacet normal isn't in the same\n        // hemisphere as the view dir or light dir\n        return 0.0f;\n\n    float albedo;\n    float F = full_fresnel_dielectric(hippt::dot(local_view_direction, local_half_vector), relative_ior);\n    if (reflecting)\n    {\n        HIPRTRenderData render_data;\n        render_data.bsdfs_data.GGX_masking_shadowing = masking_shadowing_term;\n\n        albedo = torrance_sparrow_GGX_eval_reflect<0>(render_data, roughness, 0.0f, false, ColorRGB32F(F),                                   \n            local_view_direction, local_to_light_direction, local_half_vector, \n            pdf, MaterialUtils::SPECULAR_PEAK_SAMPLED, 0).r;\n\n        // Scaling the PDF by the probability of being here (reflection of the ray and not transmission)\n        pdf *= F;\n    }\n    else\n    {\n        float dot_prod = HoL + HoV / relative_ior;\n        float dot_prod2 = dot_prod * dot_prod;\n        float denom = dot_prod2 * NoL * NoV;\n\n        float alpha_x;\n        float alpha_y;\n        MaterialUtils::get_alphas(roughness, 0.0f, alpha_x, alpha_y);\n\n        float D = GGX_anisotropic(alpha_x, alpha_y, local_half_vector);\n        float G1_V = G1_Smith(alpha_x, alpha_y, local_view_direction);\n        float G1_L = G1_Smith(alpha_x, alpha_y, local_to_light_direction);\n        float G2 = G1_V * G1_L;\n\n        float dwm_dwi = hippt::abs(HoL) / dot_prod2;\n        float D_pdf = G1_V / hippt::abs(NoV) * D * hippt::abs(HoV);\n        pdf = dwm_dwi * D_pdf;\n        // Taking refraction probability into account\n        pdf *= (1.0f - F);\n\n        // We added a check a few lines above to \"avoid dividing by 0 later on\". This is where.\n        // When NoL is 0, denom is 0 too and we're dividing by 0. \n        // The PDF of this case is as low as 1.0e-9 (light direction sampled perpendicularly to the normal)\n        // so this is an extremely rare case.\n        // The PDF being non-zero, we could actualy compute it, it's valid but not with floats :D\n        albedo = D * (1 - F) * G2 * hippt::abs(HoL * HoV / denom);\n    }\n\n    return albedo;\n}\n\n/**\n * The sampled direction is returned in the local shading frame of the basis used for 'local_view_direction'\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 GGX_glass_E_sample(float relative_ior, float roughness, const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    if (hippt::abs(relative_ior - 1.0f) < 1.0e-5f)\n        relative_ior = 1.0f + 1.0e-5f;\n\n    float alpha_x;\n    float alpha_y;\n    MaterialUtils::get_alphas(roughness, /* ignoring anisotropy */ 0.0f, alpha_x, alpha_y);\n    float3 microfacet_normal = GGX_anisotropic_sample_microfacet(local_view_direction, alpha_x, alpha_y, random_number_generator);\n\n    float F = full_fresnel_dielectric(hippt::dot(local_view_direction, microfacet_normal), relative_ior);\n    float rand_1 = random_number_generator();\n\n    float3 sampled_direction;\n    if (rand_1 < F)\n        // Reflection\n        sampled_direction = reflect_ray(local_view_direction, microfacet_normal);\n    else\n    {\n        // Refraction\n\n        if (hippt::dot(microfacet_normal, local_view_direction) < 0.0f)\n            // For the refraction operation that follows, we want the direction to refract (the view\n            // direction here) to be in the same hemisphere as the normal (the microfacet normal here)\n            // so we're flipping the microfacet normal in case it wasn't in the same hemisphere as\n            // the view direction\n            // Relative_eta as already been flipped above in the code\n            microfacet_normal = -microfacet_normal;\n\n        sampled_direction = refract_ray(local_view_direction, microfacet_normal, relative_ior);\n    }\n\n    return sampled_direction;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void glass_directional_albedo_integration(int kernel_iterations, int current_iteration, uint32_t x, uint32_t y, uint32_t z, uint32_t pixel_index, GGXGlassDirectionalAlbedoSettings bake_settings, float* out_buffer, bool exiting_surface)\n{\n    Xorshift32Generator random_number_generator(wang_hash(pixel_index + 1) * current_iteration);\n\n    float cos_theta_o = 1.0f / (bake_settings.texture_size_cos_theta_o - 1.0f) * x;\n    cos_theta_o = hippt::max(GGX_DOT_PRODUCTS_CLAMP, cos_theta_o);\n    cos_theta_o = powf(cos_theta_o, 2.5f);\n    float sin_theta_o = sin(acos(cos_theta_o));\n\n    float roughness = 1.0f / (bake_settings.texture_size_roughness - 1.0f) * y;\n    roughness = hippt::max(roughness, 1.0e-4f);\n\n    // Integrates for interface reflectivities of IORs between 1.0f and 3.0f\n    float F0 = 1.0f / (bake_settings.texture_size_ior - 1.0f) * z;\n    // Relative eta (eta_t / eta_i) from F0\n    // Using F0^4 to get more precision near 0\n    F0 *= F0; // F0^2\n    F0 *= F0; // F0^4\n    float sqrt_F0 = sqrtf(hippt::clamp(0.0f, 0.99f, F0));\n    float relative_ior = (1.0f + sqrt_F0) / (1.0f - sqrt_F0);\n\n    float3 local_view_direction = hippt::normalize(make_float3(cos(0.0f) * sin_theta_o, sin(0.0f) * sin_theta_o, cos_theta_o));\n\n    if (exiting_surface)\n        // Inverting the relative IOR in case we're inside the surface\n        relative_ior = 1.0f / relative_ior;\n\n    int iterations_per_kernel = floor(hippt::max(1.0f, GPUBakerConstants::COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH / static_cast<float>(bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness * bake_settings.texture_size_ior)));\n    int nb_kernel_launch = ceil(bake_settings.integration_sample_count / static_cast<float>(iterations_per_kernel));\n    int nb_samples = nb_kernel_launch * iterations_per_kernel;\n\n    for (int sample = 0; sample < kernel_iterations; sample++)\n    {\n        float3 sampled_local_to_light_direction = GGX_glass_E_sample(relative_ior, roughness, local_view_direction, random_number_generator);\n\n        float eval_pdf;\n        float directional_albedo = GGX_glass_E_eval(relative_ior, roughness, local_view_direction, sampled_local_to_light_direction, eval_pdf, bake_settings.masking_shadowing_term);\n        if (eval_pdf == 0.0f)\n            continue;\n\n        directional_albedo /= eval_pdf;\n        directional_albedo *= hippt::abs(sampled_local_to_light_direction.z);\n\n        out_buffer[pixel_index] += directional_albedo / nb_samples;\n    }\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXGlassDirectionalAlbedoBakeEntering(int kernel_iterations, int current_iteration, GGXGlassDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXGlassDirectionalAlbedoBakeEntering(int kernel_iterations, int current_iteration, GGXGlassDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta_o + z * bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness);\n\n    if (x >= bake_settings.texture_size_cos_theta_o || y >= bake_settings.texture_size_roughness || z >= bake_settings.texture_size_ior)\n        return;\n\n    glass_directional_albedo_integration(kernel_iterations, current_iteration, x, y, z, pixel_index, bake_settings, out_buffer, false);\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXGlassDirectionalAlbedoBakeExiting(int kernel_iterations, int current_iteration, GGXGlassDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXGlassDirectionalAlbedoBakeExiting(int kernel_iterations, int current_iteration, GGXGlassDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta_o + z * bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness);\n\n    if (x >= bake_settings.texture_size_cos_theta_o || y >= bake_settings.texture_size_roughness || z >= bake_settings.texture_size_ior)\n        return;\n\n    glass_directional_albedo_integration(kernel_iterations, current_iteration, x, y, z, pixel_index, bake_settings, out_buffer, true);\n}\n"
  },
  {
    "path": "src/Device/kernels/Baking/GGXThinGlassDirectionalAlbedo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/BSDFs/Principled.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include \"Renderer/Baker/GGXThinGlassDirectionalAlbedoSettings.h\"\n\n/* References:\n* [1][Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n* [2][Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n* [3][Dassault Enterprise PBR 2025 Specification]\n* [4][Google - Physically Based Rendering in Filament]\n* [5][MaterialX codebase on Github]\n* [6][Blender's Cycles codebase on Github]\n* \n* This kernel computes the directional albedo of a thin glass BSDF for use\n* in energy compensation code (MicrofacetEnergyCompensation.h) as proposed in\n* [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n* \n* The kernel outputs its results in one buffer (which is then written to disk as a texture).\n* The texture is parameterized by cos_theta_o (cosine view direction) and the roughness of\n* the thin BSDF and its IOR (F0 actually)\n*/\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 thin_glass_sample(float relative_eta, float roughness, const float3& local_view_direction, Xorshift32Generator& random_number_generator)\n{\n    // To avoid sampling directions that would lead to a null half_vector.\n    // Explained in more details in principled_glass_eval.\n    if (hippt::abs(relative_eta - 1.0f) < 1.0e-5f)\n        relative_eta = 1.0f + 1.0e-5f;\n\n    float alpha_x;\n    float alpha_y;\n    MaterialUtils::get_alphas(roughness, /* anisotropy */ 0.0f, alpha_x, alpha_y);\n\n    float3 microfacet_normal = GGX_anisotropic_sample_microfacet(local_view_direction, alpha_x, alpha_y, random_number_generator);\n\n    float HoV = hippt::dot(local_view_direction, microfacet_normal);\n    float F = full_fresnel_dielectric(HoV, relative_eta);\n\n    // Reference: Dielectric BSDF, PBR Book 4ed: https://pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF\n    // \n    // Adjusting fresnel reflectance for thin walled material but not above 0.1f roughness\n    // because above that, that scaling starts to be off (this scaling is only meant for roughness 0\n    // actually)\n    if (roughness < 0.1f)\n        F += hippt::square(1.0f - F) * F / (1.0f - hippt::square(F));\n\n    float rand_1 = random_number_generator();\n\n    float3 sampled_direction;\n    if (rand_1 < F)\n        // Reflection\n        sampled_direction = reflect_ray(local_view_direction, microfacet_normal);\n    else\n    {\n        // Refraction\n\n        if (hippt::dot(microfacet_normal, local_view_direction) < 0.0f)\n            // For the refraction operation that follows, we want the direction to refract (the view\n            // direction here) to be in the same hemisphere as the normal (the microfacet normal here)\n            // so we're flipping the microfacet normal in case it wasn't in the same hemisphere as\n            // the view direction\n            microfacet_normal = -microfacet_normal;\n\n        // Because the interface is thin (and so we refract twice, \"cancelling\" the bending the light),\n        // the refraction direction is just the incoming (view direction) reflected\n        // and flipped about the normal plane\n\n        float3 reflected = reflect_ray(local_view_direction, microfacet_normal);\n        // Now flipping\n        reflected.z *= -1.0f;\n\n        return reflected;\n    }\n\n    return sampled_direction;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float thin_glass_eval(float relative_eta, float roughness, const float3& local_view_direction, const float3& local_to_light_direction, float& pdf, GGXMaskingShadowingFlavor masking_shadowing_term)\n{\n    pdf = 0.0f;\n\n    float NoV = local_view_direction.z;\n    float NoL = local_to_light_direction.z;\n\n    if (hippt::abs(NoL) < 1.0e-8f)\n        // Check to avoid dividing by 0 later on\n        return 0.0f;\n\n    // We're in the case of reflection if the view direction and the bounced ray (light direction) are in the same hemisphere\n    bool reflecting = NoL * NoV > 0;\n\n    // relative_eta can be 1 when refracting from a volume into another volume of the same IOR.\n    // This in conjunction with the view direction and the light direction being the negative of\n    // one another will lead the microfacet normal to be the null vector which then causes\n    // NaNs.\n    // \n    // Example:\n    // The view and light direction can be the negative of one another when looking straight at a\n    // flat window for example. The view direction is aligned with the normal of the window\n    // in this configuration whereas the refracting light direction (and it is very likely to refract\n    // in this configuration) is going to point exactly away from the view direction and the normal.\n    // \n    // We then have\n    // \n    // half_vector  = light_dir + relative_eta * view_dir\n    //              = light_dir + 1.0f * view_dir\n    //              = light_dir + view_dir = (0, 0, 0)\n    //\n    // Normalizing this null vector then leads to a NaNs because of the zero-length.\n    //\n    // We're settings relative_eta to 1.00001f to avoid this issue\n    if (hippt::abs(relative_eta - 1.0f) < 1.0e-5f)\n        relative_eta = 1.0f + 1.0e-5f;\n\n    // Computing the generalized (that takes refraction into account) half vector\n    float3 local_half_vector;\n    if (reflecting)\n        local_half_vector = local_to_light_direction + local_view_direction;\n    else\n        // Thin walled material refract without light bending (because both refractions interfaces are simulated in one layer of material)\n        // just refract straight through i.e. light_direction = -view_direction\n        // It can be as si\n        local_half_vector = local_to_light_direction * make_float3(1.0f, 1.0f, -1.0f) + local_view_direction;\n\n    local_half_vector = hippt::normalize(local_half_vector);\n    if (local_half_vector.z < 0.0f)\n        // Because the rest of the function we're going to compute here assume\n        // that the microfacet normal is in the same hemisphere as the surface\n        // normal, we're going to flip it if needed\n        local_half_vector = -local_half_vector;\n\n    float HoL = hippt::dot(local_to_light_direction, local_half_vector);\n    float HoV = hippt::dot(local_view_direction, local_half_vector);\n\n    if (HoL * NoL < 0.0f || HoV * NoV < 0.0f)\n        // Backfacing microfacets when the microfacet normal isn't in the same\n        // hemisphere as the view dir or light dir\n        return 0.0f;\n\n    float F = full_fresnel_dielectric(HoV, relative_eta);\n    // Reference: Dielectric BSDF, PBR Book 4ed: https://pbr-book.org/4ed/Reflection_Models/Dielectric_BSDF\n    // \n    // Adjusting fresnel reflectance for thin walled material but not above 0.1f roughness\n    // because above that, that scaling starts to be off (this scaling is only meant for roughness 0\n    // actually)\n    if (roughness < 0.1f)\n        F += hippt::square(1.0f - F) * F / (1.0f - hippt::square(F));\n\n    ColorRGB32F color;\n    if (reflecting)\n    {\n        HIPRTRenderData fake_render_data;\n        fake_render_data.bsdfs_data.GGX_masking_shadowing = masking_shadowing_term;\n\n        float color = torrance_sparrow_GGX_eval_reflect<0>(fake_render_data, roughness, /* anisotropy */ 0.0f, false,\n            ColorRGB32F(F), local_view_direction, local_to_light_direction, local_half_vector, pdf,\n            MaterialUtils::SPECULAR_PEAK_SAMPLED, 0).r;\n\n        // Scaling the PDF by the probability of being here (reflection of the ray and not transmission)\n        pdf *= F;\n\n        return color;\n    }\n    else\n    {\n        float dot_prod = HoL + HoV / relative_eta;\n        float dot_prod2 = dot_prod * dot_prod;\n        float denom = dot_prod2 * NoL * NoV;\n\n        float alpha_x;\n        float alpha_y;\n        MaterialUtils::get_alphas(roughness, /* anisotropy */ 0.0f, alpha_x, alpha_y);\n\n        float D = GGX_anisotropic(alpha_x, alpha_y, local_half_vector);\n        float G1_V = G1_Smith(alpha_x, alpha_y, local_view_direction);\n        float G1_L = G1_Smith(alpha_x, alpha_y, local_to_light_direction);\n        float G2 = G1_V * G1_L;\n\n        float dwm_dwi = hippt::abs(HoL) / dot_prod2;\n        float D_pdf = G1_V / hippt::abs(NoV) * D * hippt::abs(HoV);\n        pdf = dwm_dwi * D_pdf;\n        // Taking refraction probability into account\n        pdf *= 1.0f - F;\n\n        // We added a check a few lines above to \"avoid dividing by 0 later on\". This is where.\n        // When NoL is 0, denom is 0 too and we're dividing by 0. \n        // The PDF of this case is as low as 1.0e-9 (light direction sampled perpendicularly to the normal)\n        // so this is an extremely rare case.\n        // The PDF being non-zero, we could actualy compute it, it's valid but not with floats :D\n        return D * (1.0f - F) * G2 * hippt::abs(HoL * HoV / denom);\n    }\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXThinGlassDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXThinGlassDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GGXThinGlassDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GGXThinGlassDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta_o + z * bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness);\n\n    if (x >= bake_settings.texture_size_cos_theta_o || y >= bake_settings.texture_size_roughness || z >= bake_settings.texture_size_ior)\n        return;\n\n    Xorshift32Generator random_number_generator(wang_hash(pixel_index + 1) * current_iteration);\n\n    float cos_theta_o = 1.0f / (bake_settings.texture_size_cos_theta_o - 1.0f) * x;\n    cos_theta_o = hippt::max(GGX_DOT_PRODUCTS_CLAMP, cos_theta_o);\n    //cos_theta_o = powf(cos_theta_o, 2.5f);\n    float sin_theta_o = sin(acos(cos_theta_o));\n\n    float roughness = 1.0f / (bake_settings.texture_size_roughness - 1.0f) * y;\n    roughness = hippt::max(roughness, 1.0e-4f);\n\n    // Integrates for interface reflectivities of IORs between 1.0f and 3.0f\n    float F0 = 1.0f / (bake_settings.texture_size_ior - 1.0f) * z;\n    // Relative eta (eta_t / eta_i) from F0\n    // Using F0^4 to get more precision near 0\n    F0 *= F0; // F0^2\n    F0 *= F0; // F0^4\n    float sqrt_F0 = sqrtf(hippt::clamp(0.0f, 0.99f, F0));\n    float relative_ior = (1.0f + sqrt_F0) / (1.0f - sqrt_F0);\n\n    float3 local_view_direction = hippt::normalize(make_float3(cos(0.0f) * sin_theta_o, sin(0.0f) * sin_theta_o, cos_theta_o));\n\n    int nb_kernel_launch = ceil(bake_settings.integration_sample_count / (float)kernel_iterations);\n    int nb_samples = nb_kernel_launch * kernel_iterations;\n\n    // Entering surface\n    for (int sample = 0; sample < kernel_iterations; sample++)\n    {\n        float thin_walled_roughness = MaterialUtils::get_thin_walled_roughness(true, roughness, relative_ior);\n        float3 sampled_local_to_light_direction = thin_glass_sample(relative_ior, thin_walled_roughness, local_view_direction, random_number_generator);\n\n        float eval_pdf = 0.0f;\n        float directional_albedo = thin_glass_eval(relative_ior, thin_walled_roughness, local_view_direction, sampled_local_to_light_direction, eval_pdf, bake_settings.masking_shadowing_term);\n        if (eval_pdf == 0.0f)\n            continue;\n\n        directional_albedo /= eval_pdf;\n        directional_albedo *= hippt::abs(sampled_local_to_light_direction.z);\n\n        out_buffer[pixel_index] += directional_albedo / nb_samples;\n    }\n}\n"
  },
  {
    "path": "src/Device/kernels/Baking/GlossyDielectricDirectionalAlbedo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n\n#include \"Device/includes/BSDFs/Lambertian.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/BSDFs/Microfacet.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include \"Renderer/Baker/GlossyDielectricDirectionalAlbedoSettings.h\"\n\n /* References:\n * [1][Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n * [2][Revisiting Physically Based Shading at Imageworks, Kulla & Conty, SIGGRAPH 2017]\n * [3][Dassault Enterprise PBR 2025 Specification]\n * [4][Google - Physically Based Rendering in Filament]\n * [5][MaterialX codebase on Github]\n * [6][Blender's Cycles codebase on Github]\n *\n * The kernel outputs its results in one buffer (which is then written to disk as a texture).\n * The texture is parameterized by cos_theta_o (cosine view direction), the roughness of\n * the specular GGX layer and its IOR\n */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) inline GlossyDielectricDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GlossyDielectricDirectionalAlbedoSettings bake_settings, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GlossyDielectricDirectionalAlbedoBake(int kernel_iterations, int current_iteration, GlossyDielectricDirectionalAlbedoSettings bake_settings, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n\n    const uint32_t pixel_index = (x + y * bake_settings.texture_size_cos_theta_o + z * bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness);\n\n    if (x >= bake_settings.texture_size_cos_theta_o || y >= bake_settings.texture_size_roughness || z >= bake_settings.texture_size_ior)\n        return;\n\n    Xorshift32Generator random_number_generator(wang_hash(pixel_index + 1)* current_iteration);\n\n    float cos_theta_o = 1.0f / (bake_settings.texture_size_cos_theta_o - 1.0f) * x;\n    cos_theta_o = hippt::max(GGX_DOT_PRODUCTS_CLAMP, cos_theta_o);\n    cos_theta_o = powf(cos_theta_o, 2.5f);\n    float sin_theta_o = sin(acos(cos_theta_o));\n\n    float roughness = 1.0f / (bake_settings.texture_size_roughness - 1.0f) * y;\n    roughness = hippt::max(roughness, 1.0e-4f);\n\n    // Integrates for interface reflectivities of IORs between 1.0f and 3.0f\n    float F0 = 1.0f / (bake_settings.texture_size_ior - 1.0f) * z;\n    // Relative eta (eta_t / eta_i) from F0\n    // Using F0^4 to get more precision near 0\n    F0 *= F0; // F0^2\n    F0 *= F0; // F0^4\n    float sqrt_F0 = sqrtf(hippt::clamp(0.0f, 0.99f, F0));\n    float relative_ior = (1.0f + sqrt_F0) / (1.0f - sqrt_F0);\n\n    float3 local_view_direction = hippt::normalize(make_float3(cos(0.0f) * sin_theta_o, sin(0.0f) * sin_theta_o, cos_theta_o));\n\n    int iterations_per_kernel = floor(hippt::max(1.0f, GPUBakerConstants::COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH / static_cast<float>(bake_settings.texture_size_cos_theta_o * bake_settings.texture_size_roughness * bake_settings.texture_size_ior)));\n    int nb_kernel_launch = ceil(bake_settings.integration_sample_count / static_cast<float>(iterations_per_kernel));\n    int nb_samples = nb_kernel_launch * iterations_per_kernel;\n\n    for (int sample = 0; sample < kernel_iterations; sample++)\n    {\n        // Sampling the specular GGX lobe or diffuse lobe\n        float rand_lobe = random_number_generator();\n        float3 sampled_local_to_light_direction;\n        if (rand_lobe < 0.5f)\n        {\n            // Sampling the specular lobe\n            sampled_local_to_light_direction = microfacet_GGX_sample_reflection(roughness, /* anisotropy */ 0.0f, local_view_direction, random_number_generator);\n\n            if (sampled_local_to_light_direction.z < 0)\n                // Sampled direction below surface, this can happen with microfacet\n                // sampling\n                continue;\n        }\n        else\n            // Sampling the diffuse lobe\n            sampled_local_to_light_direction = cosine_weighted_sample_z_up_frame(random_number_generator);\n\n        float3 microfacet_normal = hippt::normalize(local_view_direction + sampled_local_to_light_direction);\n        float total_pdf = 0.0f;\n\n        HIPRTRenderData render_data;\n        render_data.bsdfs_data.GGX_masking_shadowing = bake_settings.masking_shadowing_term;\n\n        float F = full_fresnel_dielectric(hippt::dot(microfacet_normal, sampled_local_to_light_direction), relative_ior);\n        float eval_pdf_specular;\n        float directional_albedo_specular = torrance_sparrow_GGX_eval_reflect<0>(render_data, roughness, /* aniso */ 0.0f, false, ColorRGB32F(F),\n            local_view_direction, sampled_local_to_light_direction, microfacet_normal, eval_pdf_specular, MaterialUtils::SPECULAR_PEAK_SAMPLED, 0).r;\n        // Multiplying the PDF by 0.5f because we have a 50% chance to sample the specular lobe\n        total_pdf += eval_pdf_specular * 0.5f;\n        float specular_layer_throughput = 1.0f;\n        specular_layer_throughput *= 1.0f - full_fresnel_dielectric(sampled_local_to_light_direction.z, relative_ior);\n        specular_layer_throughput *= 1.0f - full_fresnel_dielectric(local_view_direction.z, relative_ior);\n\n        // A material with the base color defined is the only thing needed for\n        // lambertian_brdf_eval()\n        DeviceUnpackedEffectiveMaterial mat;\n        mat.base_color = ColorRGB32F(1.0f);\n        float eval_pdf_diffuse;\n        float directional_albedo_diffuse = lambertian_brdf_eval(mat, sampled_local_to_light_direction.z, eval_pdf_diffuse).r;\n        // Multiplying the PDF by 0.5f because we have a 50% chance to sample the diffuse lobe\n        total_pdf += eval_pdf_diffuse * 0.5f;\n        // Only the fraction of light that got through the specular layer\n        // and that can get back to the viewer contributes to the illumination\n        // we get from the diffuse layer\n        directional_albedo_diffuse *= specular_layer_throughput;\n\n        float final_albedo = directional_albedo_specular + directional_albedo_diffuse;\n        final_albedo *= sampled_local_to_light_direction.z;\n        final_albedo /= total_pdf;\n\n        out_buffer[pixel_index] += final_albedo / nb_samples;\n    }\n\n#ifndef __KERNELCC__\n    // Some sanity checks on the CPU\n    float threshold = 1.1f;\n    if (out_buffer[pixel_index] > threshold || out_buffer[pixel_index] < 0 || std::isinf(out_buffer[pixel_index]) || std::isnan(out_buffer[pixel_index]))\n        std::cout << \"Error at x, y, z = [\" << x << \", \" << y << \", \" << z << \"]. Value = \" << out_buffer[pixel_index] << std::endl;\n#endif\n}\n"
  },
  {
    "path": "src/Device/kernels/CameraRays.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_CAMERA_RAY_H\n#define KERNELS_CAMERA_RAY_H\n\n#include \"Device/includes/AdaptiveSampling.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/ReSTIR/ReGIR/Representative.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void reset_render(const HIPRTRenderData& render_data, uint32_t pixel_index)\n{\n    if (render_data.aux_buffers.restir_gi_reservoir_buffer_1 != nullptr)\n    {\n        // Same for ReSTIR GI\n        if (render_data.aux_buffers.restir_gi_reservoir_buffer_1)\n        render_data.aux_buffers.restir_gi_reservoir_buffer_1[pixel_index] = ReSTIRGIReservoir();\n        \n        if (render_data.aux_buffers.restir_gi_reservoir_buffer_2)\n        render_data.aux_buffers.restir_gi_reservoir_buffer_2[pixel_index] = ReSTIRGIReservoir();\n        \n        if (render_data.aux_buffers.restir_gi_reservoir_buffer_3)\n        render_data.aux_buffers.restir_gi_reservoir_buffer_3[pixel_index] = ReSTIRGIReservoir();\n    }\n    \n    if (render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n    {\n        // These buffers are only available when either the adaptive sampling or the stop noise threshold is enabled\n        render_data.aux_buffers.pixel_sample_count[pixel_index] = 0;\n        render_data.aux_buffers.pixel_squared_luminance[pixel_index] = 0;\n        render_data.aux_buffers.pixel_converged_sample_count[pixel_index] = -1;\n    }\n    \n    // Resetting the G-Buffer\n    render_data.g_buffer.first_hit_prim_index[pixel_index] = -1;\n    render_data.g_buffer.geometric_normals[pixel_index] = Octahedral24BitNormalPadded32b::pack_static(make_float3(0.0f, 0.0f, 0.0f));\n    render_data.g_buffer.shading_normals[pixel_index] = Octahedral24BitNormalPadded32b::pack_static(make_float3(0.0f, 0.0f, 0.0f));\n    render_data.g_buffer.primary_hit_position[pixel_index] = make_float3(0.0f, 0.0f, 0.0f);\n    render_data.g_buffer.materials[pixel_index] = DevicePackedEffectiveMaterial::pack(DeviceUnpackedEffectiveMaterial());\n    \n    // Resetting the previous frame G-Buffer if we have it\n    if (render_data.render_settings.use_prev_frame_g_buffer())\n    {\n        render_data.g_buffer_prev_frame.first_hit_prim_index[pixel_index] = -1;\n        render_data.g_buffer_prev_frame.geometric_normals[pixel_index] = Octahedral24BitNormalPadded32b::pack_static(make_float3(0.0f, 0.0f, 0.0f));\n        render_data.g_buffer_prev_frame.shading_normals[pixel_index] = Octahedral24BitNormalPadded32b::pack_static(make_float3(0.0f, 0.0f, 0.0f));\n        render_data.g_buffer_prev_frame.primary_hit_position[pixel_index] = make_float3(0.0f, 0.0f, 0.0f);\n        render_data.g_buffer_prev_frame.materials[pixel_index] = DevicePackedEffectiveMaterial::pack(DeviceUnpackedEffectiveMaterial());\n    }\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void rescale_samples(HIPRTRenderData& render_data, uint32_t pixel_index)\n{\n    // Because when displaying the framebuffer, we're dividing by the number of samples to \n    // rescale the color of a pixel, we're going to have a problem if some pixels stopped samping\n    // at 10 samples while the other pixels are still being sampled and have 100 samples for example. \n    // The pixels that only received 10 samples are going to be divided by 100 at display time, making them\n    // appear too dark.\n    // We're rescaling the color of the pixels that stopped sampling here for correct display\n\n    float float_sample_number = static_cast<float>(render_data.render_settings.sample_number);\n    render_data.buffers.accumulated_ray_colors[pixel_index] = render_data.buffers.accumulated_ray_colors[pixel_index] / float_sample_number * (render_data.render_settings.sample_number + 1);\n    if (render_data.buffers.gmon_estimator.sets != nullptr)\n    {\n        int2 res = render_data.render_settings.render_resolution;\n        // GMoN is enabled, we're also going to scale the GMoN samples for the same reason\n        for (int set_index = 0; set_index < GMoNMSetsCount; set_index++)\n            // TODO this is slow\n            render_data.buffers.gmon_estimator.sets[set_index * res.x * res.y + pixel_index] = render_data.buffers.gmon_estimator.sets[set_index * res.x * res.y + pixel_index] / float_sample_number * (render_data.render_settings.sample_number + 1);\n    }\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) CameraRays(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline CameraRays(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (render_data.render_settings.need_to_reset)\n        reset_render(render_data, pixel_index);\n\n    // 'Render low resolution' means that the user is moving the camera for example\n    // so we're going to reduce the quality of the render for increased framerates\n    // while moving\n    if (render_data.render_settings.do_render_low_resolution())\n    {\n        int res_scaling = render_data.render_settings.render_low_resolution_scaling;\n\n        // If rendering at low resolution, only one pixel out of res_scaling^2 will be rendered\n        if (x % res_scaling != 0 || y % res_scaling != 0)\n        {\n            render_data.aux_buffers.pixel_active[pixel_index] = false;\n\n            return;\n        }\n\n        pixel_index /= res_scaling;\n    }\n    \n    if (render_data.render_settings.use_prev_frame_g_buffer())\n    {\n        render_data.g_buffer_prev_frame.geometric_normals[pixel_index] = render_data.g_buffer.geometric_normals[pixel_index];\n        render_data.g_buffer_prev_frame.shading_normals[pixel_index] = render_data.g_buffer.shading_normals[pixel_index];\n        render_data.g_buffer_prev_frame.materials[pixel_index] = render_data.g_buffer.materials[pixel_index];\n        render_data.g_buffer_prev_frame.primary_hit_position[pixel_index] = render_data.g_buffer.primary_hit_position[pixel_index];\n        render_data.g_buffer_prev_frame.first_hit_prim_index[pixel_index] = render_data.g_buffer.first_hit_prim_index[pixel_index];\n    }\n\n    bool sampling_needed = true;\n    bool pixel_converged = false;\n    sampling_needed = adaptive_sampling(render_data, pixel_index, pixel_converged);\n    \n    if (pixel_converged || !sampling_needed)\n    {\n        if (render_data.render_settings.do_update_status_buffers)\n            // Updating if we have the right to (when do_update_status_buffers is true).\n            // do_update_status_buffers is only true on the last sample of a frame\n            // \n            // Indicating that this pixel has reached the threshold in render_settings.stop_noise_threshold\n            hippt::atomic_fetch_add(render_data.aux_buffers.pixel_count_converged_so_far, 1u);\n    }\n\n    if (render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n    {\n        if (!sampling_needed)\n        {\n            rescale_samples(render_data, pixel_index);\n\n            render_data.aux_buffers.pixel_active[pixel_index] = false;\n\n            return;\n        }\n        else\n            render_data.aux_buffers.pixel_sample_count[pixel_index]++;\n    }\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    // Direction to the center of the pixel\n    float x_ray_point_direction = (x + 0.5f);\n    float y_ray_point_direction = (y + 0.5f);\n    if (render_data.current_camera.do_jittering)\n    {\n        // Jitter randomly around the center\n        x_ray_point_direction += random_number_generator() - 0.5f;\n        y_ray_point_direction += random_number_generator() - 0.5f;\n    }\n\n    hiprtRay ray = render_data.current_camera.get_camera_ray(x_ray_point_direction, y_ray_point_direction, render_data.render_settings.render_resolution);\n    RayPayload ray_payload;\n\n    HitInfo closest_hit_info;\n    bool intersection_found = trace_main_path_ray(render_data, ray, ray_payload, closest_hit_info, /* camera ray = no previous primitive hit */ -1, /* bounce. Always 0 for camera rays*/ 0, random_number_generator);\n\n    if (intersection_found)\n    {\n        render_data.g_buffer.geometric_normals[pixel_index].pack(closest_hit_info.geometric_normal);\n        render_data.g_buffer.shading_normals[pixel_index].pack(closest_hit_info.shading_normal);\n\n        render_data.g_buffer.materials[pixel_index] = DevicePackedEffectiveMaterial::pack(ray_payload.material);\n        render_data.g_buffer.primary_hit_position[pixel_index] = closest_hit_info.inter_point;\n    }\n    else\n        // Special case when not hitting anything\n        //\n        // The view directions are reconstructed from the primary hit and the camera position\n        // but if we didn't hit anything, there's no primary hit. \n        // \n        // But we're still going to need to be able to reconstruct the view direction \n        // so we're faking the primary hit with the point the ray was directed to instead.\n        //\n        // If you're wondering: \"yeah but then the rest of the ray tracing passes are going to use a wrong primary hit position?\"\n        //      --> No because the 'first_hit_prim_index' indicates whether we have a primary hit or not.\n        //          If we don't have a primary hit, we're never going to use the float3 in the 'primary_hit_position'\n        //          buffer as an actual position, \n        render_data.g_buffer.primary_hit_position[pixel_index] = ray.origin + ray.direction;\n        \n    render_data.g_buffer.first_hit_prim_index[pixel_index] = intersection_found ? closest_hit_info.primitive_index : -1;\n    render_data.aux_buffers.pixel_active[pixel_index] = true;\n\n    ReGIR_update_representative_data(render_data, closest_hit_info.inter_point, closest_hit_info.geometric_normal, render_data.current_camera, closest_hit_info.primitive_index, true, ray_payload.material);\n\n    // If we got here, this means that we still have at least one ray active\n    if (render_data.render_settings.do_update_status_buffers)\n    {\n        // Updating if we have the right to (when do_update_status_buffers is true).\n        // do_update_status_buffers is only true on the last sample of a frame\n        render_data.aux_buffers.still_one_ray_active[0] = 1;\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/Experimentations/RegistersTest.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n/**\n * This kernel is a playground for understanding what kind of optimizations the GPU compiler is able to do on variable usage ---> register pressure.\n * \n * This allows the validation of simple intuitions such as: \"The compiler optimizes away unused variables\". \n * \n * But what if the variable is passed in a function that itself doesn't use it? Try it out: the compiler optimizes it away too.\n * Fun fact: the variable is also optimized if used but not initialized.\n * \n * You get the idea.\n */\n\n/**\n * Here's a rundown of all that I tested already:\n * \n * - Unused variable: optimized away, no register cost.\n * - Variable passed to a function that doesn't use it: optimized away, no register cost.\n * - Precomputing a result in a temporary variable to avoid recomputing many times: no register cost. \n *      This must be because using a temporary variable or not, the result of the calculation must be in\n *      a register anyway so it's only 1 register in both cases\n * - Two different variables equal to the same value: only using one register\n * - Variable declared in a structure. That structure is passed to a function that doesn't use the variable ---> variable optimized away.\n */\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Xorshift.h\"\n\nHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n{\n    seed = (seed ^ 61) ^ (seed >> 16);\n    seed *= 9;\n    seed = seed ^ (seed >> 4);\n    seed *= 0x27d4eb2d;\n    seed = seed ^ (seed >> 15);\n    return seed;\n}\n\n//struct DataStruct\n//{\n//    unsigned char a, b, c, d;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    return (int)data.a % (int)data.b + (int)data.c + (int)data.d;\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//\n//    unsigned char  rand_a = (unsigned char)randomGenerator.xorshift32();\n//    unsigned char  rand_b = (unsigned char)randomGenerator.xorshift32();\n//    unsigned char  rand_c = (unsigned char)randomGenerator.xorshift32();\n//    unsigned char  rand_d = (unsigned char)randomGenerator.xorshift32();\n//\n//    data.a = rand_a;\n//    data.b = rand_b;\n//    data.c = rand_c;\n//    data.d = rand_d;\n//\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n// 9 registers\n\n\n\n\n\n\n\n\n//struct DataStruct\n//{\n//    int packed;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    return  ((data.packed >> 0) | 0b11111111) % // a \n//            ((data.packed >> 8) | 0b11111111) + // b\n//            ((data.packed >> 16) | 0b11111111) + // c\n//            ((data.packed >> 24) | 0b11111111); // d\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//\n//    data.packed = (int)randomGenerator.xorshift32();\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n// 7 registers\n\n\n\n\n\n\n\n\n\n//struct DataStruct\n//{\n//    short int a;\n//    short int b;\n//    short int c;\n//    short int d;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    return data.a % data.b + data.c + data.d;\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//    data.a = (short int)randomGenerator.xorshift32();\n//    data.b = (short int)randomGenerator.xorshift32();\n//    data.c = (short int)randomGenerator.xorshift32();\n//    data.d = (short int)randomGenerator.xorshift32();\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n//// 10 registers\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n//struct DataStruct\n//{\n//    int packed1;\n//    int packed2;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    return  ((data.packed1 >> 0) | 0xFFFF) % ((data.packed1 >> 16) | 0xFFFF) + ((data.packed2 >> 0) | 0xFFFF) + ((data.packed2 >> 16) | 0xFFFF);\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//    data.packed1 = randomGenerator.xorshift32();\n//    data.packed2 = randomGenerator.xorshift32();\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n// 7 registers\n\n\n\n\n\n\n\n\n\n//struct DataStruct\n//{\n//    int a;\n//    int b;\n//    int c;\n//    int d;\n//    int e;\n//    int f;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    return data.a % data.b + data.c + data.d + data.e + data.f;\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//    data.a = randomGenerator.xorshift32();\n//    data.b = randomGenerator.xorshift32();\n//    data.c = randomGenerator.xorshift32();\n//    data.d = randomGenerator.xorshift32();\n//    data.e = randomGenerator.xorshift32();\n//    data.f = randomGenerator.xorshift32();\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n// 10 registers\n\n\n\n\n\n\n//struct DataStruct\n//{\n//    int ab;\n//    int cd;\n//    int ef;\n//};\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE unsigned int wang_hash(unsigned int seed)\n//{\n//    seed = (seed ^ 61) ^ (seed >> 16);\n//    seed *= 9;\n//    seed = seed ^ (seed >> 4);\n//    seed *= 0x27d4eb2d;\n//    seed = seed ^ (seed >> 15);\n//    return seed;\n//}\n//\n//HIPRT_HOST_DEVICE HIPRT_INLINE int sumFunction(DataStruct& data)\n//{\n//    int a = data.ab & (0xFFFF << 0);\n//    int b = data.ab & (0xFFFF << 16);\n//    int c = data.cd & (0xFFFF << 0);\n//    int d = data.cd & (0xFFFF << 16);\n//    int e = data.ef & (0xFFFF << 0);\n//    int f = data.ef & (0xFFFF << 16);\n//    return a % b + c + d + e + f;\n//}\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    DataStruct data;\n//    data.ab = randomGenerator.xorshift32();\n//    data.cd = randomGenerator.xorshift32();\n//    data.ef = randomGenerator.xorshift32();\n//    int result = sumFunction(data);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(result);\n//}\n\n//#include \"Device/includes/Dispatcher.h\"\n//\n//#ifdef __KERNELCC__\n//GLOBAL_KERNEL_SIGNATURE(void) TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera)\n//#else\n//GLOBAL_KERNEL_SIGNATURE(void) inline TestFunction(HIPRTRenderData render_data, int2 res, HIPRTCamera camera, int x, int y)\n//#endif\n//{\n//#ifdef __KERNELCC__\n//    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n//    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n//#endif\n//    const uint32_t threadId = (x + y * res.x);\n//\n//    if (threadId >= res.x * res.y)\n//        return;\n//\n//    Xorshift32Generator randomGenerator(wang_hash(threadId + 1));\n//\n//    float pdf;\n//    int mat_index = (int)(threadId * randomGenerator() * 50);\n//    DeviceTexturedMaterial mat = render_data.buffers.materials_buffer[(int)(threadId * randomGenerator() * 50) % 10];\n//    ColorRGB32F eval_out = bsdf_dispatcher_eval(render_data.buffers.materials_buffer, mat, render_data.g_buffer.ray_volume_states[threadId], make_float3(0.5, 1.0, 2), make_float3(0.5, 1.0, 2), make_float3(0.5, 1.0, 2), pdf);\n//\n//    int incident, outgoing;\n//    bool leaving;\n//    render_data.g_buffer.ray_volume_states[threadId].interior_stack.push(incident, outgoing, leaving, mat_index, render_data.buffers.materials_buffer[mat_index].dielectric_priority);\n//    render_data.g_buffer.ray_volume_states[threadId].interior_stack.push(incident, outgoing, leaving, mat_index + 5, render_data.buffers.materials_buffer[mat_index + 5].dielectric_priority);\n//    render_data.g_buffer.ray_volume_states[threadId].interior_stack.push(incident, outgoing, leaving, mat_index * 5, render_data.buffers.materials_buffer[mat_index * 5].dielectric_priority);\n//    render_data.g_buffer.ray_volume_states[threadId].interior_stack.push(incident, outgoing, leaving, mat_index * 25, render_data.buffers.materials_buffer[mat_index * 25].dielectric_priority);\n//\n//    render_data.buffers.pixels[threadId] = ColorRGB32F(render_data.g_buffer.ray_volume_states[threadId].interior_stack.stack_entries[1].odd_parity) * eval_out;\n//}\n"
  },
  {
    "path": "src/Device/kernels/Experimentations/Test3DTexture.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n /**\n  * Kernel for testing that creating and reading from a 3D texture happens correctly.\n  * The 3D texture is just written to a linear buffer and the linear buffer is then\n  * expected to contain the data of the texture, basically just a copy of it.\n  */\n\n#include \"Device/includes/FixIntellisense.h\"\n\n#include <Orochi/Orochi.h>\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) Test3DTexture(oroTextureObject_t texture_3D, int tex_size, float* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) Test3DTexture(oroTextureObject_t texture_3D, int tex_size, float* out_buffer, int x, int y, int z)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n    const uint32_t z = blockIdx.z * blockDim.z + threadIdx.z;\n#endif\n    if (x >= tex_size || y >= tex_size || z >= tex_size)\n        return;\n\n    const uint32_t thread_index = (x + y * tex_size + z * tex_size * tex_size);\n\n    out_buffer[thread_index * 4] = tex3D<float4>(texture_3D, x + 0.35f, y + 0.35f, z + 0.35f).y;\n}\n"
  },
  {
    "path": "src/Device/kernels/Experimentations/TestCopyKernelAlignment.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n /**\n  * Kernel for testing that creating and reading from a 3D texture happens correctly.\n  * The 3D texture is just written to a linear buffer and the linear buffer is then\n  * expected to contain the data of the texture, basically just a copy of it.\n  */\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"HostDeviceCommon/Color.h\"\n\n#include <Orochi/Orochi.h>\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelAlignment(ColorRGB32F* __restrict__ buffer_a, const ColorRGB32F* __restrict__ buffer_b, size_t buffer_size)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelAlignment(ColorRGB32F* __restrict__ buffer_a, const ColorRGB32F* __restrict__ buffer_b, size_t buffer_size, int x)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n    uint32_t offset = 13;\n    uint32_t index = x + offset;\n\n    if (index >= buffer_size)\n        return;\n\n    buffer_a[index] = buffer_b[index];\n}\n"
  },
  {
    "path": "src/Device/kernels/Experimentations/TestCopyKernelRestrict.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n /**\n  * Kernel for testing that creating and reading from a 3D texture happens correctly.\n  * The 3D texture is just written to a linear buffer and the linear buffer is then\n  * expected to contain the data of the texture, basically just a copy of it.\n  */\n\n#include \"Device/includes/FixIntellisense.h\"\n\n#include <Orochi/Orochi.h>\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelRestrict(float* buffer_a, float* buffer_b, float* buffer_c, float* buffer_d, size_t buffer_size)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelRestrict(float* __restrict__ buffer_a, const float* __restrict__ buffer_b, float* __restrict__ buffer_c, float* __restrict__ buffer_d, size_t buffer_size, int x)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n    if (x >= buffer_size)\n        return;\n\n    buffer_a[x] = buffer_a[x] + buffer_b[x];\n    buffer_d[x] = buffer_a[x] * buffer_b[x];\n    buffer_d[x] *= buffer_c[x];\n}\n"
  },
  {
    "path": "src/Device/kernels/Experimentations/TestCopyKernelSimple.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n\n#include <Orochi/Orochi.h>\n\nusing TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE = DeviceUnpackedEffectiveMaterial;\n\nstruct TestCopyKernelSimpleInputData\n{\n    TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE* buffer_a;\n    TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE* buffer_b;\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void copy_function(const TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE* __restrict__ input_buffer, TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE* __restrict__ output_buffer, uint32_t tIdx)\n{\n    output_buffer[tIdx] = input_buffer[tIdx];\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelSimple(TestCopyKernelSimpleInputData input, size_t buffer_size)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) TestCopyKernelSimple(TestCopyKernelSimpleInputData input, size_t buffer_size, int x)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n    uint32_t offset = 13;\n    uint32_t index = x + offset;\n\n    if (index >= buffer_size)\n        return;\n\n    copy_function(input.buffer_b, input.buffer_a, index);\n}\n"
  },
  {
    "path": "src/Device/kernels/GMoN/GMoNComputeMedianOfMeans.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_GMON_H\n#define KERNELS_GMON_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/GMoN/GMoN.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n/**\n * Kernel for the implementation of GMoN\n *\n * Reference:\n * [1] [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\n */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) GMoNComputeMedianOfMeans(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline GMoNComputeMedianOfMeans(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (render_data.render_settings.sample_number == 0)\n    {\n        // For sample 0, this is a special case where we're just going to\n        // copy the current pixel color (which is only 1 sample accumulated)\n        // to the output framebuffer such that we don't get a black\n        // viewport while the full GMoN median of means computation\n        // hasn't been launched\n        render_data.buffers.gmon_estimator.result_framebuffer[pixel_index] = render_data.buffers.accumulated_ray_colors[pixel_index];\n\n        return;\n    }\n\n    ColorRGB32F GMoN_color = gmon_compute_median_of_means(render_data.buffers.gmon_estimator, pixel_index, render_data.render_settings.sample_number, render_data.render_settings.render_resolution);\n\n    render_data.buffers.gmon_estimator.result_framebuffer[pixel_index] = GMoN_color;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/Megakernel.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_MEGAKERNEL_H\n#define KERNELS_MEGAKERNEL_H\n\n#include \"Device/includes/AdaptiveSampling.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/LightSampling/Lights.h\"\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Material.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) MegaKernel(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline MegaKernel(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (!render_data.aux_buffers.pixel_active[pixel_index])\n        return;\n\n    if (render_data.render_settings.do_render_low_resolution())\n        // Reducing the number of bounces to 3 if rendering at low resolution\n        // for better interactivity\n        render_data.render_settings.nb_bounces = hippt::min(3, render_data.render_settings.nb_bounces);\n\n#if ViewportColorOverriden == 1\n    // If some kernel option is going to debug some color in the viewport,\n    // then we're clearing the viewport buffer here\n    render_data.buffers.accumulated_ray_colors[pixel_index] = ColorRGB32F();\n#endif\n\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    // Initializing the closest hit info the information from the camera ray pass\n    HitInfo closest_hit_info;\n    closest_hit_info.inter_point = render_data.g_buffer.primary_hit_position[pixel_index];\n    closest_hit_info.geometric_normal = hippt::normalize(render_data.g_buffer.geometric_normals[pixel_index].unpack());\n    closest_hit_info.shading_normal = hippt::normalize(render_data.g_buffer.shading_normals[pixel_index].unpack());\n    closest_hit_info.primitive_index = render_data.g_buffer.first_hit_prim_index[pixel_index];\n\n    // Initializing the ray with the information from the camera ray pass\n    hiprtRay ray;\n    ray.direction = hippt::normalize(-render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index));\n    RayPayload ray_payload;\n    ray_payload.next_ray_state = RayState::BOUNCE;\n    ray_payload.material = render_data.g_buffer.materials[pixel_index].unpack();\n\n    // Because this is the camera hit (and assuming the camera isn't inside volumes for now),\n    // the ray volume state after the camera hit is just an empty interior stack but with\n    // the material index that we hit pushed onto the stack. That's it. Because it is that\n    // simple, we don't have the ray volume state in the GBuffer but rather we can\n    // reconstruct the ray volume state on the fly\n    ray_payload.volume_state.reconstruct_first_hit(\n        ray_payload.material,\n        render_data.buffers.material_indices,\n        closest_hit_info.primitive_index,\n        random_number_generator);\n\n    // + 1 to nb_bounces here because we want \"0\" bounces to still act as one\n    // hit and to return some color\n    bool intersection_found = closest_hit_info.primitive_index != -1;\n\n    for (int& bounce = ray_payload.bounce; bounce < render_data.render_settings.nb_bounces + 1; bounce++)\n    {\n        if (ray_payload.next_ray_state != RayState::MISSED)\n        {\n            if (bounce > 0)\n                intersection_found = path_tracing_find_indirect_bounce_intersection(render_data, ray, ray_payload, closest_hit_info, random_number_generator);\n\n            if (intersection_found)\n            {\n                if (bounce == 0)\n                    store_denoiser_AOVs(render_data, pixel_index, closest_hit_info.shading_normal, ray_payload.material.base_color);\n                else if (bounce > 0)\n                {\n\t\t\t\t\tbool ReGIR_primary_hit = render_data.render_settings.regir_settings.compute_is_primary_hit(ray_payload);\n\n                    // Storing data for ReGIR representative points\n                    ReGIR_update_representative_data(render_data, closest_hit_info.inter_point, closest_hit_info.geometric_normal, render_data.current_camera, closest_hit_info.primitive_index, ReGIR_primary_hit, ray_payload.material);\n                }\n\n                // TODO REMOVE THE DEBUG IF\n                if (bounce > 0 || render_data.render_settings.enable_direct)\n                {\n                    ray_payload.ray_color += estimate_direct_lighting(render_data, ray_payload, closest_hit_info, -ray.direction, x, y, random_number_generator);\n\n                    sanity_check<true>(render_data, ray_payload.ray_color, x, y);\n                }\n\n                BSDFIncidentLightInfo sampled_light_info; // This variable is never used, this is just for debugging on the CPU so that we know what the BSDF sampled\n                bool valid_indirect_bounce = path_tracing_compute_next_indirect_bounce(render_data, ray_payload, closest_hit_info, -ray.direction, ray, random_number_generator, &sampled_light_info);\n                if (!valid_indirect_bounce)\n                    // Bad BSDF sample (under the surface), killed by russian roulette, ...\n                    break;\n            }\n            else\n            {\n                ray_payload.ray_color += path_tracing_miss_gather_envmap(render_data, ray_payload, ray.direction, pixel_index);\n                ray_payload.next_ray_state = RayState::MISSED;\n\n                sanity_check<true>(render_data, ray_payload.ray_color, x, y);\n            }\n        }\n        else if (ray_payload.next_ray_state == RayState::MISSED)\n            break;\n    }\n\n    // Checking for NaNs / negative value samples. Output \n    if (!sanity_check(render_data, ray_payload.ray_color, x, y))\n        return;\n\n    // If we got here, this means that we still have at least one ray active\n    // This is a concurrent write by the way but we don't really care, everyone is writing\n    // the same value\n    render_data.aux_buffers.still_one_ray_active[0] = 1;\n\n    path_tracing_accumulate_debug_view_color(render_data, ray_payload, pixel_index, random_number_generator);\n    path_tracing_accumulate_color(render_data, ray_payload.ray_color, pixel_index);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/NEE++/GridPrepopulate.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_NEE_PLUS_PLUS_GRID_PREPOPULATE_H\n#define KERNELS_NEE_PLUS_PLUS_GRID_PREPOPULATE_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/LightSampling/Lights.h\"\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Material.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/Sampling.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\nHIPRT_DEVICE void accumulate_NEE_plus_plus(HIPRTRenderData& render_data, const hiprtRay& ray, const HitInfo& closest_hit_info, RayPayload& ray_payload, Xorshift32Generator& random_number_generator)\n{\n    // Just making sure that this is not set to false\n    render_data.nee_plus_plus.m_update_visibility_map = true;\n\n    for (int sample = 0; sample < 100; sample++)\n    {\n        constexpr int SAMPLING_STRATEGY = DirectLightSamplingBaseStrategy == LSS_BASE_REGIR ? ReGIR_GridFillLightSamplingBaseStrategy : DirectLightSamplingBaseStrategy;\n\n        LightSampleInformation light_sample = sample_one_emissive_triangle<SAMPLING_STRATEGY>(render_data,\n            closest_hit_info.inter_point, -ray.direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal,\n            closest_hit_info.primitive_index, ray_payload,\n            random_number_generator);\n\n        if (light_sample.area_measure_pdf <= 0.0f)\n            // Can happen for very small triangles\n            continue;\n\n        float3 shadow_ray_origin = closest_hit_info.inter_point;\n        float3 shadow_ray_direction = light_sample.point_on_light - shadow_ray_origin;\n        float distance_to_light = hippt::length(shadow_ray_direction);\n        float3 shadow_ray_direction_normalized = shadow_ray_direction / distance_to_light;\n\n        hiprtRay shadow_ray;\n        shadow_ray.origin = shadow_ray_origin;\n        shadow_ray.direction = shadow_ray_direction_normalized;\n\n        ColorRGB32F light_source_radiance;\n        // abs() here to allow backfacing light sources\n        float dot_light_source = compute_cosine_term_at_light_source(light_sample.light_source_normal, -shadow_ray.direction);\n        if (dot_light_source > 0.0f)\n        {\n            NEEPlusPlusContext nee_plus_plus_context;\n            nee_plus_plus_context.point_on_light = light_sample.point_on_light;\n            nee_plus_plus_context.shaded_point = shadow_ray_origin;\n            bool in_shadow = evaluate_shadow_ray_nee_plus_plus(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, nee_plus_plus_context, random_number_generator, ray_payload.bounce);\n        }\n    }\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) NEEPlusPlus_Grid_Prepopulate(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline NEEPlusPlus_Grid_Prepopulate(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = (blockIdx.x * blockDim.x + threadIdx.x) * ReGIR_GridPrepopulationResolutionDownscale;\n    const uint32_t y = (blockIdx.y * blockDim.y + threadIdx.y) * ReGIR_GridPrepopulationResolutionDownscale;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n    Xorshift32Generator random_number_generator(seed);\n\n    // Direction to the center of the pixel\n    float x_ray_point_direction = (x + 0.5f);\n    float y_ray_point_direction = (y + 0.5f);\n    if (render_data.current_camera.do_jittering)\n    {\n        // Jitter randomly around the center\n        x_ray_point_direction += random_number_generator() - 0.5f;\n        y_ray_point_direction += random_number_generator() - 0.5f;\n    }\n\n    hiprtRay ray = render_data.current_camera.get_camera_ray(x_ray_point_direction, y_ray_point_direction, render_data.render_settings.render_resolution);\n    RayPayload ray_payload;\n\n    HitInfo closest_hit_info;\n    bool intersection_found = trace_main_path_ray(render_data, ray, ray_payload, closest_hit_info, /* camera ray = no previous primitive hit */ -1, /* bounce. Always 0 for camera rays*/ 0, random_number_generator);\n\n    if (!intersection_found)\n        return;\n\n    for (int& bounce = ray_payload.bounce; bounce < render_data.render_settings.nb_bounces + 1; bounce++)\n    {\n        if (ray_payload.next_ray_state != RayState::MISSED)\n        {\n            if (bounce > 0)\n                intersection_found = path_tracing_find_indirect_bounce_intersection(render_data, ray, ray_payload, closest_hit_info, random_number_generator);\n\n            if (intersection_found)\n            {\n                accumulate_NEE_plus_plus(render_data, ray, closest_hit_info, ray_payload, random_number_generator);\n\n                BSDFIncidentLightInfo sampled_light_info; // This variable is never used, this is just for debugging on the CPU so that we know what the BSDF sampled\n                bool valid_indirect_bounce = path_tracing_compute_next_indirect_bounce(render_data, ray_payload, closest_hit_info, -ray.direction, ray, random_number_generator, &sampled_light_info);\n                if (!valid_indirect_bounce)\n                    // Bad BSDF sample (under the surface), killed by russian roulette, ...\n                    break;\n            }\n            else\n                return;\n        }\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/NEE++/NEEPlusPlusFinalizeAccumulation.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_NEE_PLUS_PLUS_FINALIZE_ACCUMULATION_H\n#define KERNELS_NEE_PLUS_PLUS_FINALIZE_ACCUMULATION_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/NEE++/NEE++.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) NEEPlusPlusFinalizeAccumulation(NEEPlusPlusDevice nee_plus_plus_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline NEEPlusPlusFinalizeAccumulation(NEEPlusPlusDevice nee_plus_plus_data, int x)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n    uint32_t pixel_index = x;\n    if (x >= nee_plus_plus_data.m_total_number_of_cells)\n        return;\n\n    // nee_plus_plus_data.copy_accumulation_buffers(pixel_index);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DI/FusedSpatiotemporalReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIOTEMPORAL_REUSE_H\n#define DEVICE_RESTIR_DI_SPATIOTEMPORAL_REUSE_H\n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/SpatialMISWeight.h\"\n#include \"Device/includes/ReSTIR/SpatiotemporalMISWeight.h\"\n#include \"Device/includes/ReSTIR/SpatiotemporalNormalizationWeight.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n#include \"Device/includes/ReSTIR/UtilsTemporal.h\"\n#include \"Device/includes/Sampling.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n *\n * [1] [Spatiotemporal reservoir resampling for real-time ray tracing with dynamic direct lighting] https://research.nvidia.com/labs/rtr/publication/bitterli2020spatiotemporal/\n * [2] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n * [3] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time - SIGGRAPH 2023 Presentation Video] https://dl.acm.org/doi/10.1145/3587423.3595511#sec-supp\n * [4] [NVIDIA RTX DI SDK - Github] https://github.com/NVIDIAGameWorks/RTXDI\n * [5] [Generalized Resampled Importance Sampling Foundations of ReSTIR] https://research.nvidia.com/publication/2022-07_generalized-resampled-importance-sampling-foundations-restir\n * [6] [Uniform disk sampling] https://rh8liuqy.github.io/Uniform_Disk.html\n * [7] [Reddit Post for the Jacobian term needed] https://www.reddit.com/r/GraphicsProgramming/comments/1eo5hqr/restir_di_light_sample_pdf_confusion/\n * [8] [Rearchitecting Spatiotemporal Resampling for Production] https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production\n */\n\n#define TEMPORAL_NEIGHBOR_ID 0\n\nHIPRT_HOST_DEVICE HIPRT_INLINE bool do_include_spatial_visibility_term_or_not(const HIPRTRenderData& render_data, int current_neighbor_index)\n{\n\tconst ReSTIRCommonSpatialPassSettings& spatial_settings = render_data.render_settings.restir_di_settings.common_spatial_pass;\n\tbool visibility_only_on_last_pass = spatial_settings.do_visibility_only_last_pass;\n\tbool is_last_pass = spatial_settings.spatial_pass_index == spatial_settings.number_of_passes - 1;\n\n\t// Only using the visibility term on the last pass if so desired\n\tbool include_target_function_visibility = visibility_only_on_last_pass && is_last_pass;\n\t// Also allowing visibility if we want it at every pass\n\tinclude_target_function_visibility |= !spatial_settings.do_visibility_only_last_pass;\n\n\t// Only doing visibility for a few neighbors depending on 'neighbor_visibility_count'\n\tinclude_target_function_visibility &= current_neighbor_index < spatial_settings.neighbor_visibility_count;\n\n\t// Only doing visibility if we want it at all\n\tinclude_target_function_visibility &= ReSTIR_DI_SpatialTargetFunctionVisibility;\n\n\t// We don't want visibility for the center pixel because we're going to reuse the\n\t// target function stored in the reservoir anyways\n\t// Note: the center pixel has index 'spatial_settings.reuse_neighbor_count'\n\t// while actual *neighbors* have index between [0, spatial_settings.reuse_neighbor_count - 1]\n\tinclude_target_function_visibility &= current_neighbor_index != spatial_settings.reuse_neighbor_count;\n\n\treturn include_target_function_visibility;\n}\n\n/**\n * Returns -1 if there is no valid temporal neighbor.\n * The linear buffer index of the temporal neighbor otherwise\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE int3 load_spatiotemporal_neighbor_data(const HIPRTRenderData& render_data, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t  const ReSTIRSurface& center_pixel_surface, int center_pixel_index, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t  ReSTIRDIReservoir& out_temporal_neighbor_reservoir, ReSTIRSurface& out_temporal_neighbor_surface, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t  Xorshift32Generator& random_number_generator)\n{\n\tint3 temporal_neighbor_pixel_index_and_pos = find_temporal_neighbor_index<false>(render_data,\n\t\trender_data.g_buffer.primary_hit_position[center_pixel_index], center_pixel_surface.shading_normal, center_pixel_index, random_number_generator);\n\tif (temporal_neighbor_pixel_index_and_pos.x == -1 || render_data.render_settings.freeze_random)\n\t\t// Temporal occlusion / disoccusion --> temporal neighbor is invalid,\n\t\t// we're only going to resample the initial candidates so let's set that as\n\t\t// the output right away\n\t\t//\n\t\t// We're also 'disabling' temporal accumulation if the renderer's random is frozen otherwise\n\t\t// very strong correlations will creep up, corrupt the render and potentially invalidate\n\t\t// performance measurements (which we're probably trying to measure since we froze the random)\n\t\treturn temporal_neighbor_pixel_index_and_pos;\n\n\tout_temporal_neighbor_reservoir = render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs[temporal_neighbor_pixel_index_and_pos.x];\n\tif (out_temporal_neighbor_reservoir.M == 0)\n\t\t// No temporal neighbor\n\t\treturn temporal_neighbor_pixel_index_and_pos;\n\n\t// Reading from the previous g-buffer or not depending on whether or not the prev g-buffer is available\n\t// (it may not be if we're accumulating because then, it's useless since there is no motion)\n\tout_temporal_neighbor_surface = get_pixel_surface(render_data, temporal_neighbor_pixel_index_and_pos.x, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\tif (out_temporal_neighbor_surface.material.is_emissive())\n\t\t// Can't resample the temporal neighbor if it's emissive\n\t\treturn temporal_neighbor_pixel_index_and_pos;\n\n\treturn temporal_neighbor_pixel_index_and_pos;\n}\n\n/**\n * Counts how many neighbors are eligible for reuse.\n * This is needed for proper normalization by pairwise MIS weights.\n *\n * A neighbor is not eligible if it is outside of the viewport or if\n * it doesn't satisfy the normal/plane/roughness heuristics\n *\n * 'out_valid_neighbor_M_sum' is the sum of the M values (confidences) of the\n * valid neighbors. Used by confidence-weights pairwise MIS weights\n *\n * The bits of 'out_neighbor_heuristics_cache' are 1 or 0 depending on whether or not\n * the corresponding neighbor was valid or not (can be reused later to avoid having to\n * re-evauate the heuristics). Neighbor 0 is LSB.\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE void count_valid_spatiotemporal_neighbors(const HIPRTRenderData& render_data, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t const ReSTIRSurface& center_pixel_surface, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t int center_pixel_index, int2 temporal_neighbor_position, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t int& out_valid_neighbor_count, int& out_valid_neighbor_M_sum, int& out_neighbor_heuristics_cache)\n{\n\tint reused_neighbors_count = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<false>(render_data).reuse_neighbor_count;\n\n\t// The RNG for generating the neighbors. It's important to use the same RNG here as the one used in the main for-loop\n\t// of the spatial reuse such that we count the right neighbors\n\tXorshift32Generator spatial_neighbors_rng(render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_neighbors_rng_seed);\n\n\tout_valid_neighbor_count = 0;\n\tfor (int neighbor_index = 0; neighbor_index < reused_neighbors_count; neighbor_index++)\n\t{\n\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<false>(render_data, neighbor_index, temporal_neighbor_position, spatial_neighbors_rng);\n\t\tif (neighbor_pixel_index == -1)\n\t\t\t// Neighbor out of the viewport / invalid\n\t\t\tcontinue;\n\n\t\tif (!check_neighbor_similarity_heuristics<false>(render_data,\n\t\t\tneighbor_pixel_index, center_pixel_index, \n\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<false>(render_data, center_pixel_surface),\n\t\t\trender_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\tcontinue;\n\n\t\tout_valid_neighbor_M_sum += render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs[neighbor_pixel_index].M;\n\t\tout_valid_neighbor_count++;\n\t\tout_neighbor_heuristics_cache |= (1 << neighbor_index);\n\t}\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_DI_SpatiotemporalReuse(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_DI_SpatiotemporalReuse(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n\tconst uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n\tconst uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\tif (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tuint32_t center_pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n\n\tif (!render_data.aux_buffers.pixel_active[center_pixel_index] || render_data.g_buffer.first_hit_prim_index[center_pixel_index] == -1)\n\t\t// Pixel inactive because of adaptive sampling, returning\n\t\t// Or also we don't have a primary hit\n\t\treturn;\n\n\t// Initializing the random generator\n\t// TODO try having multiply instead of XOR again\n\tunsigned int seed = render_data.render_settings.freeze_random ? wang_hash(center_pixel_index + 1) : wang_hash(((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1)) ^ render_data.random_number);\n\tXorshift32Generator random_number_generator(seed);\n\n\t// Generating a unique seed per pixel that will be used to generate the spatial neighbors of that pixel if Hammersley isn't used\n\trender_data.render_settings.restir_di_settings.common_spatial_pass.spatial_neighbors_rng_seed = random_number_generator.xorshift32();\n\n\tint2 center_pixel_coords = make_int2(x, y);\n\n\tif (render_data.render_settings.restir_di_settings.common_temporal_pass.temporal_buffer_clear_requested)\n\t\t// We requested a temporal buffer clear for ReSTIR DI\n\t\trender_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs[center_pixel_index] = ReSTIRDIReservoir();\n\n\t// Surface data of the center pixel\n\tReSTIRSurface center_pixel_surface = get_pixel_surface(render_data, center_pixel_index, random_number_generator);\n\n\tReSTIRDIReservoir temporal_neighbor_reservoir;\n\tReSTIRSurface temporal_neighbor_surface;\n\tint3 temporal_neighbor_pixel_index_and_pos = load_spatiotemporal_neighbor_data(render_data, center_pixel_surface, center_pixel_index, temporal_neighbor_reservoir, temporal_neighbor_surface, random_number_generator);\n\tif ((temporal_neighbor_pixel_index_and_pos.x == -1 || temporal_neighbor_reservoir.M <= 1) && render_data.render_settings.restir_di_settings.common_spatial_pass.do_disocclusion_reuse_boost)\n\t\t// Increasing the number of spatial samples for disocclusions\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count = render_data.render_settings.restir_di_settings.common_spatial_pass.disocclusion_reuse_count;\n\n\tsetup_adaptive_directional_spatial_reuse<false>(render_data, center_pixel_index, random_number_generator);\n\n\t// 'selected_neighbor' is only used with MIS-like weight\n\t// \n\t// Will keep the index of the neighbor that has been selected by resampling. \n\tint selected_neighbor = 0;\n\tint neighbor_heuristics_cache = 0;\n\tint valid_neighbors_count = 0;\n\tint valid_neighbors_M_sum = 0;\n\tcount_valid_spatiotemporal_neighbors(render_data, center_pixel_surface, center_pixel_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z), valid_neighbors_count, valid_neighbors_M_sum, neighbor_heuristics_cache);\n\tif (temporal_neighbor_pixel_index_and_pos.x != -1 && temporal_neighbor_reservoir.M > 0)\n\t{\n\t\t// Adding the temporal neighbor to the count \n\t\tvalid_neighbors_count++;\n\t\tvalid_neighbors_M_sum += temporal_neighbor_reservoir.M;\n\t}\n\n\tReSTIRDIReservoir spatiotemporal_output_reservoir;\n\tReSTIRDIReservoir initial_candidates_reservoir = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[center_pixel_index];\n\tReSTIRSpatiotemporalResamplingMISWeight<ReSTIR_DI_BiasCorrectionWeights, /* IsReSTIRGI */ false> mis_weight_function;\n\tif (temporal_neighbor_pixel_index_and_pos.x != -1)\n\t{\n\t\t// Resampling the temporal neighbor\n\n\t\tif (temporal_neighbor_reservoir.M > 0)\n\t\t{\n\t\t\tfloat target_function_at_center = 0.0f;\n\t\t\tif (temporal_neighbor_reservoir.UCW > 0.0f)\n\t\t\t\t// Only resampling if the temporal neighbor isn't empty\n\t\t\t\t//\n\t\t\t\t// If the temporal neighbor's reservoir is empty, then we do not get\n\t\t\t\t// inside that if() and the target function stays at 0.0f which eliminates\n\t\t\t\t// most of the computations afterwards\n\t\t\t\t//\n\t\t\t\t// Matching the visibility used here with the bias correction mode for ease \n\t\t\t\t// of use (and because manually handling the visibility in the target \n\t\t\t\t// function of the temporal reuse is tricky for the user to use in \n\t\t\t\t// combination with other parameters and on top of that, it makes little \n\t\t\t\t// technical sense since our temporal neighbor is supposed to be unoccluded \n\t\t\t\t// (unless geometry moves around in the scene but that's another problem)\n\t\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, temporal_neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, temporal_neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\t\ttemporal_neighbor_reservoir.UCW,\n\t\t\t\ttemporal_neighbor_reservoir.sample,\n\t\t\t\t\n\t\t\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\t\t\t\tTEMPORAL_NEIGHBOR_ID, initial_candidates_reservoir.M, temporal_neighbor_reservoir.M, \n\t\t\t\tcenter_pixel_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z),\n\t\t\t\trandom_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\t\tbool update_mc = initial_candidates_reservoir.M > 0 && initial_candidates_reservoir.UCW > 0.0f;\n\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, \n\t\t\t\t\n\t\t\t\ttemporal_neighbor_reservoir.M, temporal_neighbor_reservoir.sample.target_function,\n\t\t\t\tinitial_candidates_reservoir.M, initial_candidates_reservoir.sample.target_function,\n\t\t\t\tinitial_candidates_reservoir.sample,\n\n\t\t\t\ttarget_function_at_center, temporal_neighbor_pixel_index_and_pos.x, valid_neighbors_count, valid_neighbors_M_sum, \n\t\t\t\tupdate_mc, /* resample canonical */ false, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\t\tbool update_mc = initial_candidates_reservoir.M > 0 && initial_candidates_reservoir.UCW > 0.0f;\n\n\t\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\t\ttemporal_neighbor_reservoir.M, temporal_neighbor_reservoir.sample.target_function,\n\t\t\t\tinitial_candidates_reservoir.M, initial_candidates_reservoir.sample.target_function,\n\n\t\t\t\tinitial_candidates_reservoir.sample, center_pixel_surface,\n\n\t\t\t\ttarget_function_at_center, temporal_neighbor_pixel_index_and_pos.x, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\t\tupdate_mc, /* resample canonical */ false, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t\t// Combining as in Alg. 6 of the paper\n\t\t\tfloat jacobian_determinant = 1.0f;\n\t\t\tif (spatiotemporal_output_reservoir.combine_with(temporal_neighbor_reservoir, temporal_neighbor_resampling_mis_weight, target_function_at_center, jacobian_determinant, random_number_generator))\n\t\t\t{\n\t\t\t\t// Only used with MIS-like weight\n\t\t\t\tselected_neighbor = TEMPORAL_NEIGHBOR_ID;\n\n\t\t\t\t// Using ReSTIR_DI_BiasCorrectionUseVisibility here because that's what we use in the resampling target function\n#if ReSTIR_DI_BiasCorrectionUseVisibility == KERNEL_OPTION_FALSE\n\t\t\t\t// We cannot be certain that the visibility of the temporal neighbor\n\t\t\t\t// chosen is exactly the same so we're clearing the unoccluded flag\n\t\t\t\tspatiotemporal_output_reservoir.sample.flags &= ~ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#else\n\t\t\t\t// However, if we're using the visibility in the target function, then\n\t\t\t\t// the temporal neighobr could never have been selected unless it is\n\t\t\t\t// unoccluded so we can add the flag\n\t\t\t\tspatiotemporal_output_reservoir.sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#endif\n\t\t\t}\n\t\t}\n\n\t\tspatiotemporal_output_reservoir.sanity_check(center_pixel_coords);\n\t}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\tReSTIRDIReservoir* spatial_input_reservoir_buffer = render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs;\n\tXorshift32Generator spatial_neighbors_rng(render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_neighbors_rng_seed);\n\n\t// Resampling the neighbors. Using neighbors + 1 here so that\n\t// we can use the last iteration of the loop to resample the *initial candidates reservoir*\n\tint reused_neighbors_count = render_data.render_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count;\n\tint start_index = 0;\n\tif (valid_neighbors_M_sum == 0)\n\t\t// No spatial resampling to do, only the initial candidate reservoir (potentially)\n\t\t// so we can directly start there\n\t\tstart_index = reused_neighbors_count;\n\tfor (int spatial_neighbor_index = start_index; spatial_neighbor_index < reused_neighbors_count + 1; spatial_neighbor_index++)\n\t{\n\t\t// We can already check whether or not this neighbor is going to be\n\t\t// accepted at all by checking the heuristic cache\n\t\tif (spatial_neighbor_index < reused_neighbors_count && reused_neighbors_count <= 32)\n\t\t\t// If not the center pixel, we can check the heuristics, otherwise there's no need to,\n\t\t\t// we know that the center pixel will be accepted\n\t\t\t// \n\t\t\t// Our heuristics cache is a 32bit int so we can only cache 32 values are we're\n\t\t\t// going to have issues if we try to read more than that.\n\t\t\tif ((neighbor_heuristics_cache & (1 << spatial_neighbor_index)) == 0)\n\t\t\t{\n\t\t\t\t// Advancing the rng for generating the spatial neighbors since if we \"continue\" here, the spatial neighbors rng\n\t\t\t\t// isn't going to be advanced by the call to 'get_spatial_neighbor_pixel_index' below so we're doing it manually\n\t\t\t\tspatial_neighbor_advance_rng<false>(render_data, spatial_neighbors_rng);\n\n\t\t\t\t// Neighbor not passing the heuristics tests, skipping it right away\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\tint neighbor_pixel_index = -1;\n\t\tif (spatial_neighbor_index == reused_neighbors_count)\n\t\t\t// Last iteration, resampling the initial candidates\n\t\t\tneighbor_pixel_index = center_pixel_index;\n\t\telse\n\t\t\t// Resampling around the temporal neighbor location\n\t\t\tneighbor_pixel_index = get_spatial_neighbor_pixel_index<false>(render_data,  spatial_neighbor_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z), spatial_neighbors_rng);\n\n\t\tif (neighbor_pixel_index == -1)\n\t\t\t// Neighbor out of the viewport\n\t\t\tcontinue;\n\n\t\tif (spatial_neighbor_index < reused_neighbors_count && reused_neighbors_count > 32)\n\t\t\t// If not the center pixel, we can check the heuristics\n\t\t\t// \n\t\t\t// Only checking the heuristic if we have more than 32 neighbors (does not fit in the heuristic cache)\n\t\t\t// If we have less than 32 neighbors, we've already checked the cache at the beginning of this for loop\n\t\t\tif (!check_neighbor_similarity_heuristics<false>(render_data,\n\t\t\t\tneighbor_pixel_index, center_pixel_index,\n\t\t\t\tcenter_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<false>(render_data, center_pixel_surface),\n\t\t\t\trender_data.render_settings.use_prev_frame_g_buffer()))\n\t\t\t{\n\t\t\t\tcontinue;\n\t\t\t}\n\n\t\t// Neighbor surface needed for roughness m-capping and jacobian determinant\n\t\tReSTIRDIReservoir neighbor_reservoir;\n\t\tif (spatial_neighbor_index == reused_neighbors_count)\n\t\t\t// Last iteration, resampling the initial candidates\n\t\t\tneighbor_reservoir = initial_candidates_reservoir;\n\t\telse\n\t\t\tneighbor_reservoir = spatial_input_reservoir_buffer[neighbor_pixel_index];\n\n\t\tfloat target_function_at_center = 0.0f;\n\t\tbool do_neighbor_target_function_visibility = do_include_spatial_visibility_term_or_not(render_data, spatial_neighbor_index);\n\t\tif (neighbor_reservoir.UCW > 0.0f)\n\t\t{\n\t\t\tif (spatial_neighbor_index == reused_neighbors_count)\n\t\t\t\t// No need to evaluate the center sample at the center pixel, that's exactly\n\t\t\t\t// the target function of the center reservoir\n\t\t\t\ttarget_function_at_center = neighbor_reservoir.sample.target_function;\n\t\t\telse\n\t\t\t\tif (do_neighbor_target_function_visibility)\n\t\t\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<KERNEL_OPTION_TRUE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<KERNEL_OPTION_FALSE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t}\n\n\t\tReSTIRSurface neighbor_surface = get_pixel_surface(render_data, neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\t\t// Using 'spatial_neighbor_index + 1' in this function call because the index\n\t\t// 0 is for the temporal neighbor so we start at 1 by using '+ 1'\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, \n\t\t\tneighbor_reservoir.UCW,\n\t\t\tneighbor_reservoir.sample,\n\t\t\t\n\t\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\t\t\tspatial_neighbor_index + 1, initial_candidates_reservoir.M, temporal_neighbor_reservoir.M, \n\t\t\tcenter_pixel_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z),\n\t\t\trandom_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\tbool update_mc = initial_candidates_reservoir.M > 0 && initial_candidates_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, \n\t\t\t\t\n\t\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function, \n\t\t\t\tinitial_candidates_reservoir.M, initial_candidates_reservoir.sample.target_function,\n\t\t\t\tinitial_candidates_reservoir.sample,\n\n\t\t\t\ttarget_function_at_center, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\t\tupdate_mc, spatial_neighbor_index == reused_neighbors_count, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\tbool update_mc = initial_candidates_reservoir.M > 0 && initial_candidates_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function,\n\t\t\tinitial_candidates_reservoir.M, initial_candidates_reservoir.sample.target_function,\n\n\t\t\tinitial_candidates_reservoir.sample, center_pixel_surface,\n\n\t\t\ttarget_function_at_center, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\tupdate_mc, spatial_neighbor_index == reused_neighbors_count, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t// Combining as in Alg. 6 of the paper\n\t\tif (spatiotemporal_output_reservoir.combine_with(neighbor_reservoir, mis_weight, target_function_at_center, 1.0f, random_number_generator))\n\t\t{\n\t\t\t// Only used with MIS-like weight\n\t\t\t// \n\t\t\t// + 1 here because we've already resampled the temporal neighbor so we need to account for that\n\t\t\tselected_neighbor = spatial_neighbor_index + 1;\n\n\t\t\tif (do_neighbor_target_function_visibility)\n\t\t\t\t// If we resampled the neighbor with visibility, then we are sure that we can set the flag\n\t\t\t\tspatiotemporal_output_reservoir.sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// If we didn't resample the neighbor with visibility\n\t\t\t\tif (spatial_neighbor_index == reused_neighbors_count)\n\t\t\t\t\t// If we just resampled the center pixel, then we can copy the visibility flag\n\t\t\t\t\tspatiotemporal_output_reservoir.sample.flags |= neighbor_reservoir.sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\t\telse\n\t\t\t\t\t// This was not the center pixel, we cannot be certain what the visibility at the center\n\t\t\t\t\t// pixel of the neighbor sample we just resample is so we're clearing the bit\n\t\t\t\t\tspatiotemporal_output_reservoir.sample.flags &= ~ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\t}\n\t\t}\n\t\tspatiotemporal_output_reservoir.sanity_check(center_pixel_coords);\n\t}\n\n\tfloat normalization_numerator = 1.0f;\n\tfloat normalization_denominator = 1.0f;\n\n\tReSTIRSpatiotemporalNormalizationWeight<ReSTIR_DI_BiasCorrectionWeights, /* Is ReSTIR GI */ false> normalization_function;\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\tnormalization_function.get_normalization(render_data, \n\t\tspatiotemporal_output_reservoir.weight_sum, initial_candidates_reservoir.M, \n\t\tcenter_pixel_surface, \n\t\ttemporal_neighbor_reservoir.M, center_pixel_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z),\n\t\tnormalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\tnormalization_function.get_normalization(render_data, \n\t\tspatiotemporal_output_reservoir.sample, spatiotemporal_output_reservoir.weight_sum, \n\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\t\tinitial_candidates_reservoir.M, temporal_neighbor_reservoir.M, center_pixel_index, \n\t\tmake_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z), \n\t\tnormalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\tnormalization_function.get_normalization(render_data, \n\t\tspatiotemporal_output_reservoir.sample, spatiotemporal_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface, temporal_neighbor_surface, \n\t\tselected_neighbor, initial_candidates_reservoir.M, temporal_neighbor_reservoir.M, center_pixel_index, make_int2(temporal_neighbor_pixel_index_and_pos.y, temporal_neighbor_pixel_index_and_pos.z),\n\t\tnormalization_numerator, normalization_denominator, \n\t\trandom_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\tspatiotemporal_output_reservoir.end_with_normalization(normalization_numerator, normalization_denominator);\n\tspatiotemporal_output_reservoir.sanity_check(center_pixel_coords);\n\n\t// Only these 3 weighting schemes are affected\n#if (ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO) \\\n\t&& ReSTIR_DI_BiasCorrectionUseVisibility == KERNEL_OPTION_TRUE \\\n\t&& (ReSTIR_DI_DoVisibilityReuse == KERNEL_OPTION_TRUE || (ReSTIR_DI_InitialTargetFunctionVisibility == KERNEL_OPTION_TRUE && ReSTIR_DI_SpatialTargetFunctionVisibility == KERNEL_OPTION_TRUE))\n\t// Why is this needed?\n\t//\n\t// Picture the case where we have visibility reuse (at the end of the initial candidates sampling pass),\n\t// visibility term in the bias correction target function (when counting the neighbors that could\n\t// have produced the picked sample) and 2 spatial reuse passes.\n\t//\n\t// The first spatial reuse pass reuses from samples that were produced with visibility in mind\n\t// (because of the visibility reuse pass that discards occluded samples). This means that we need\n\t// the visibility in the target function used when counting the neighbors that could have produced\n\t// the picked sample otherwise we may think that our neighbor could have produced the picked\n\t// sample where actually it couldn't because the sample is occluded at the neighbor. We would\n\t// then have a Z denominator (with 1/Z weights) that is too large and we'll end up with darkening.\n\t//\n\t// Now at the end of the first spatial reuse pass, the center pixel ends up with a sample that may\n\t// or may not be occluded from the center's pixel point of view. We didn't include the visibility\n\t// in the target function when resampling the neighbors (only when counting the \"correct\" neighbors\n\t// but that's all) so we are not giving a 0 weight to occluded resampled neighbors --> it is possible\n\t// that we picked an occluded sample.\n\t//\n\t// In the second spatial reuse pass, we are now going to resample from our neighbors and get some\n\t// samples that were not generated with occlusion in mind (because the resampling target function of\n\t// the first spatial reuse doesn't include visibility). Yet, we are going to weight them with occlusion\n\t// in mind. This means that we are probably going to discard samples because of occlusion that could\n\t// have been generated because they are generated without occlusion test. We end up discarding too many\n\t// samples --> brightening bias.\n\t//\n\t// With the visibility reuse at the end of each spatial pass, we force samples at the end of each\n\t// spatial reuse to take visibility into account so that when we weight them with visibility testing,\n\t// everything goes well\n\t//\n\t// As an optimization, we also do this for the pairwise MIS because pairwise MIS evaluates the target function\n\t// of reservoirs at their own location. Doing the visibility reuse here ensures that a reservoir sample at its own location\n\t// includes visibility and so we do not need to recompute the target function of the neighbors in this case. We can just\n\t// reuse the target function stored in the reservoir\n\t//\n\t// We also give the user the choice to remove bias using this option or not as it introduces very little bias\n\t// in practice (but noticeable when switching back and forth between reference image/biased image)\n\t//\n\t// We only need this if we're going to temporally reuse (because then the output of the spatial reuse must be correct\n\t// for the temporal reuse pass) or if we have multiple spatial reuse passes and this is not the last spatial pass\n\tif (render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass || render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes - 1 != render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_pass_index)\n\t\tReSTIR_DI_visibility_test_kill_reservoir(render_data, spatiotemporal_output_reservoir, center_pixel_surface.shading_point, center_pixel_surface.primitive_index, random_number_generator);\n#endif\n\n\t// M-capping so that we don't have to M-cap when reading reservoirs on the next frame\n\tif (render_data.render_settings.restir_di_settings.m_cap > 0)\n\t\t// M-capping the temporal neighbor if an M-cap has been given\n\t\tspatiotemporal_output_reservoir.M = hippt::min(spatiotemporal_output_reservoir.M, render_data.render_settings.restir_di_settings.m_cap);\n\n\trender_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs[center_pixel_index] = spatiotemporal_output_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DI/InitialCandidates.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RESTIR_DI_INITIAL_CANDIDATES_H\n#define KERNELS_RESTIR_DI_INITIAL_CANDIDATES_H\n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/DI/PresampledLight.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"HostDeviceCommon/KernelOptions/ReSTIRDIOptions.h\"\n\n#define LIGHT_DOESNT_CONTRIBUTE_ENOUGH -42.0f\n\n/**\n * Reference: https://en.wikipedia.org/wiki/Pairing_function\n */\nHIPRT_HOST_DEVICE HIPRT_INLINE int cantor_pairing_function(int x, int y)\n{\n    return (x + y + 1) * (x + y) / 2 + y;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDISample use_presampled_light_candidate(const HIPRTRenderData& render_data, const int2& pixel_coords,\n    const float3& evaluated_point, const float3& shading_normal,\n    ColorRGB32F& out_sample_radiance, float& out_sample_cosine_term, float& out_sample_pdf, float& out_distance_to_light, float3& out_to_light_direction,\n    Xorshift32Generator& random_number_generator)\n{\n    const ReSTIRDILightPresamplingSettings& light_presampling_settings = render_data.render_settings.restir_di_settings.light_presampling;\n\n    // We want all threads in a block of light_presampling_settings.tile_size * light_presampling_settings.tile_size\n    // pixels to sample from the same random subset of lights.\n    // We compute a unique number per each light_presampling_settings.tile_size * light_presampling_settings.tile_size\n    // tile of pixels and use that unique number as seed for our random number generator\n    int tile_index_seed = cantor_pairing_function(pixel_coords.x / light_presampling_settings.tile_size, pixel_coords.y / light_presampling_settings.tile_size);\n\n    Xorshift32Generator subset_rng(render_data.random_number * (tile_index_seed + 1));\n    int random_subset_index = subset_rng.random_index(light_presampling_settings.number_of_subsets);\n    int random_light_index_in_subset = random_number_generator.random_index(light_presampling_settings.subset_size);\n    int light_sample_index = random_subset_index * light_presampling_settings.subset_size + random_light_index_in_subset;\n\n    ReSTIRDIPresampledLight presampled_light_sample = light_presampling_settings.light_samples[light_sample_index];\n\n    ReSTIRDISample light_sample;\n    light_sample.emissive_triangle_index = presampled_light_sample.emissive_triangle_index;\n    light_sample.point_on_light_source = presampled_light_sample.point_on_light_source;\n    light_sample.flags = presampled_light_sample.flags;\n\n    out_sample_radiance = presampled_light_sample.radiance;\n    out_sample_pdf = presampled_light_sample.pdf;\n\n    if (light_sample.is_envmap_sample())\n    {\n        out_to_light_direction = matrix_X_vec(render_data.world_settings.envmap_to_world_matrix, light_sample.point_on_light_source);\n        out_distance_to_light = 1.0e35f;\n    }\n    else\n    {\n        out_to_light_direction = light_sample.point_on_light_source - evaluated_point;\n        out_to_light_direction = out_to_light_direction / (out_distance_to_light = hippt::length(out_to_light_direction)); // Normalization\n    }\n\n    out_sample_cosine_term = hippt::dot(shading_normal, out_to_light_direction);\n\n    if (!light_sample.is_envmap_sample())\n    {\n        bool contributes_enough = check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, out_sample_radiance * out_sample_cosine_term / out_sample_pdf);\n        if (!contributes_enough)\n        {\n            // Early check that the light contributes enough to the point, and if it doesn't, skip that light sample\n\n            // Setting it to LIGHT_DOESNT_CONTRIBUTE_ENOUGH so that we know that the sample is invalid when the caller of this\n            // function will look at the target function's value\n            light_sample.target_function = LIGHT_DOESNT_CONTRIBUTE_ENOUGH;\n\n            return light_sample;\n        }\n    }\n\n    return light_sample;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDISample sample_fresh_light_candidate(const HIPRTRenderData& render_data, float envmap_candidate_probability, \n    const float3& view_direction, const HitInfo& closest_hit_info,\n    RayPayload& ray_payload,\n    ColorRGB32F& out_sample_radiance, float& out_sample_cosine_term, float& out_sample_pdf, Xorshift32Generator& random_number_generator)\n{\n    ReSTIRDISample light_sample;\n\n    float3 evaluated_point = closest_hit_info.inter_point;\n\n    if (random_number_generator() > envmap_candidate_probability)\n    {\n        // Light sample\n\n        LightSampleInformation light_sample_info = sample_one_emissive_triangle(render_data, \n            closest_hit_info.inter_point, view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, \n            closest_hit_info.primitive_index, ray_payload,\n            random_number_generator);\n\n        light_sample.emissive_triangle_index = light_sample_info.emissive_triangle_index;\n        light_sample.point_on_light_source = light_sample_info.point_on_light;\n        out_sample_pdf = light_sample_info.area_measure_pdf;\n\n        if (out_sample_pdf > 0.0f)\n        {\n            // It can happen that the light PDF returned by the emissive triangle\n            // sampling function is 0 because of emissive triangles that are so\n            // small that we cannot compute their normal and their area (the cross\n            // product of their edges gives a quasi-null vector --> length of 0.0f --> area of 0)\n\n            float distance_to_light;\n            float3 to_light_direction = light_sample.point_on_light_source - evaluated_point;\n            to_light_direction = to_light_direction / (distance_to_light = hippt::length(to_light_direction)); // Normalization\n\n            out_sample_cosine_term = hippt::max(0.0f, hippt::dot(closest_hit_info.shading_normal, to_light_direction));\n\n            float cosine_at_light_source = compute_cosine_term_at_light_source(light_sample_info.light_source_normal, -to_light_direction);\n            bool contributes_enough = check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, light_sample_info.emission * out_sample_cosine_term / out_sample_pdf);\n            if (!contributes_enough)\n            {\n                // Early check that the light contributes enough to the point, and if it doesn't, skip that light sample\n\n                // Setting it to LIGHT_DOESNT_CONTRIBUTE_ENOUGH so that we know that the sample is invalid when the caller of this\n                // function will look at the target function's value\n                light_sample.target_function = LIGHT_DOESNT_CONTRIBUTE_ENOUGH;\n\n                return light_sample;\n            }\n\n            // Accounting for the probability of sampling a light, not the envmap\n            // (which has probability 'envmap_candidate_probability')\n            out_sample_pdf *= (1.0f - envmap_candidate_probability);\n            out_sample_radiance = light_sample_info.emission;\n        }\n    }\n    else\n    {\n        // Envmap sample\n\n        float3 envmap_sampled_direction;\n        out_sample_radiance = envmap_sample(render_data.world_settings, envmap_sampled_direction, out_sample_pdf, random_number_generator);\n        out_sample_cosine_term = hippt::max(0.0f, hippt::dot(envmap_sampled_direction, closest_hit_info.shading_normal));\n\n        bool contributes_enough = check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, out_sample_radiance * out_sample_cosine_term / out_sample_pdf);\n        if (!contributes_enough)\n        {\n            // Early check that the envmap sample contributes enough to the point, and if it doesn't, skip it\n\n            // Setting it to LIGHT_DOESNT_CONTRIBUTE_ENOUGH so that we know that the sample is invalid when the caller of this\n            // function will look at the target function's value\n            light_sample.target_function = LIGHT_DOESNT_CONTRIBUTE_ENOUGH;\n\n            return light_sample;\n\n        }\n\n        // Taking into account the fact that we only have a 1 in 'envmap_candidate_probability' chance to sample\n        // the envmap\n        out_sample_pdf *= envmap_candidate_probability;\n\n        light_sample.emissive_triangle_index = -1;\n        // Storing in envmap space\n        light_sample.point_on_light_source = matrix_X_vec(render_data.world_settings.world_to_envmap_matrix, envmap_sampled_direction);\n        light_sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_ENVMAP_SAMPLE;\n    }\n\n    return light_sample;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void sample_light_candidates(const HIPRTRenderData& render_data, const HitInfo& closest_hit_info, RayPayload& ray_payload, ReSTIRDIReservoir& reservoir, \n    int nb_light_candidates, int nb_bsdf_candidates, float envmap_candidate_probability, \n    const float3& view_direction, Xorshift32Generator& random_number_generator, const int2& pixel_coords)\n{\n    for (int i = 0; i < nb_light_candidates; i++)\n    {\n        ColorRGB32F sample_radiance;\n        float sample_cosine_term = 0.0f;\n        float light_pdf_area_measure = 0.0f;\n\n        float distance_to_light = 0.0f;\n        float3 to_light_direction{ 0.0f, 0.0f, 0.0f };\n#if ReSTIR_DI_DoLightPresampling == KERNEL_OPTION_TRUE\n        ReSTIRDISample light_sample = use_presampled_light_candidate(render_data, pixel_coords, \n            closest_hit_info.inter_point, closest_hit_info.shading_normal, \n            sample_radiance, sample_cosine_term, light_pdf_area_measure, distance_to_light, to_light_direction, \n            random_number_generator);\n#else\n        ReSTIRDISample light_sample = sample_fresh_light_candidate(render_data, envmap_candidate_probability, \n            view_direction, closest_hit_info, ray_payload,\n            sample_radiance, sample_cosine_term, light_pdf_area_measure, random_number_generator);\n\n        if (light_sample.is_envmap_sample())\n        {\n            to_light_direction = matrix_X_vec(render_data.world_settings.envmap_to_world_matrix, light_sample.point_on_light_source);\n            distance_to_light = 1.0e35f;\n        }\n        else\n        {\n            to_light_direction = light_sample.point_on_light_source - closest_hit_info.inter_point;\n            to_light_direction = to_light_direction / (distance_to_light = hippt::length(to_light_direction)); // Normalization\n        }\n#endif\n\n        if (light_sample.target_function == LIGHT_DOESNT_CONTRIBUTE_ENOUGH)\n            continue;\n\n        float candidate_weight = 0.0f;\n        if (sample_cosine_term > 0.0f && light_pdf_area_measure > 0.0f)\n        {\n            float bsdf_pdf_solid_angle;\n            BSDFIncidentLightInfo incident_light_info;\n            BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, to_light_direction, incident_light_info, ray_payload.volume_state, false, ray_payload.material, ray_payload.bounce, ray_payload.accumulated_roughness);\n            ColorRGB32F bsdf_color = bsdf_dispatcher_eval(render_data, bsdf_context, bsdf_pdf_solid_angle, random_number_generator);\n\n            // Filling a surface to give to 'ReSTIR_DI_evaluate_target_function'\n            ReSTIRSurface surface;\n            surface.geometric_normal = closest_hit_info.geometric_normal;\n            surface.primitive_index = closest_hit_info.primitive_index;\n            surface.material = ray_payload.material;\n            surface.ray_volume_state = ray_payload.volume_state;\n            surface.shading_normal = closest_hit_info.shading_normal;\n            surface.shading_point = closest_hit_info.inter_point;\n            surface.view_direction = view_direction;\n\n            float target_function = ReSTIR_DI_evaluate_target_function<false>(render_data, light_sample, surface, random_number_generator);\n            if (bsdf_pdf_solid_angle <= 0.0f || !check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, target_function / light_pdf_area_measure / bsdf_pdf_solid_angle))\n                target_function = 0.0f;\n            else\n            {\n                float light_pdf_solid_angle;\n                if (light_sample.is_envmap_sample()) \n                    // For envmap sample, the PDF is already in solid angle\n                    light_pdf_solid_angle = light_pdf_area_measure;\n                else\n                {\n                    float3 light_normal = get_triangle_normal_not_normalized(render_data, light_sample.emissive_triangle_index);\n                    float normal_length = hippt::length(light_normal);\n                    float light_area = normal_length * 0.5f;\n                    light_normal /= normal_length;\n\n                    // Converting from area measure to solid angle measure so that we use the balance heuristic we the same measure PDFs\n                    // (same measure for the BSDF PDF and the light PDF)\n                    //\n                    // Removing the envmap proba to avoid double counting it below in\n                    light_pdf_solid_angle = area_to_solid_angle_pdf(light_pdf_area_measure / (1.0f - envmap_candidate_probability), distance_to_light, compute_cosine_term_at_light_source(light_normal, -to_light_direction));\n                    light_pdf_solid_angle *= (1.0f - envmap_candidate_probability);\n                }\n\n                float mis_weight = balance_heuristic(light_pdf_solid_angle, nb_light_candidates, bsdf_pdf_solid_angle, nb_bsdf_candidates);\n                candidate_weight = mis_weight * target_function / light_pdf_area_measure;\n                sanity_check<true>(render_data, ColorRGB32F(candidate_weight), 0, 0);\n\n                light_sample.target_function = target_function;\n            }\n        }\n\n#if ReSTIR_DI_InitialTargetFunctionVisibility == KERNEL_OPTION_TRUE\n        if (!render_data.render_settings.do_render_low_resolution() && light_sample.target_function > 0.0f)\n        {\n            // Only doing visiblity if we're render at low resolution\n            // (meaning we're moving the camera) for better movement framerates\n            // Also, only testing visibility if we got a valid sample\n\n            hiprtRay shadow_ray;\n            shadow_ray.origin = closest_hit_info.inter_point;\n            shadow_ray.direction = to_light_direction;\n\n            bool visible = !evaluate_shadow_ray_occluded(render_data, shadow_ray, distance_to_light, closest_hit_info.primitive_index, /* bounce. Always 0 for ReSTIR DI*/ 0, random_number_generator);\n            if (!visible)\n            {\n                // Sample occluded, it is not going to be resampled anyways because it is\n                // going to have a 0 contribution so we just take it into account in the\n                // reservoir (because even if it has zero-contribution, this is still a resampled sample)\n                reservoir.M++;\n\n                // And we go onto the next sample\n                continue;\n            }\n\n            // We are now sure that if the sample survived, it is unoccluded\n            light_sample.flags |= RESTIR_DI_FLAGS_UNOCCLUDED;\n        }\n#endif\n\n        reservoir.add_one_candidate(light_sample, candidate_weight, random_number_generator);\n        reservoir.sanity_check(make_int2(-1, -1));\n    }\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE void sample_bsdf_candidates(const HIPRTRenderData& render_data, const HitInfo& closest_hit_info, RayPayload& ray_payload, ReSTIRDIReservoir& reservoir, int nb_light_candidates, int nb_bsdf_candidates, float envmap_candidate_probability, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    // Sampling the BSDF candidates\n    for (int i = 0; i < nb_bsdf_candidates; i++)\n    {\n        float bsdf_sample_pdf_solid_angle = 0.0f;\n        float3 bsdf_sampled_direction;\n\n        BSDFIncidentLightInfo sampled_lobe_info;\n        BSDFContext bsdf_context(view_direction, closest_hit_info.shading_normal, closest_hit_info.geometric_normal, make_float3(0.0f, 0.0f, 0.0f), sampled_lobe_info, ray_payload.volume_state, false, ray_payload.material, /* bounce */ 0, ray_payload.accumulated_roughness);\n        ColorRGB32F bsdf_color = bsdf_dispatcher_sample(render_data, bsdf_context, bsdf_sampled_direction, bsdf_sample_pdf_solid_angle, random_number_generator);\n\n        if (bsdf_sample_pdf_solid_angle > 0.0f)\n        {\n            hiprtRay bsdf_ray;\n            bsdf_ray.origin = closest_hit_info.inter_point;\n            bsdf_ray.direction = bsdf_sampled_direction;\n\n            BSDFLightSampleRayHitInfo shadow_light_ray_hit_info;\n            bool hit_found = evaluate_bsdf_light_sample_ray(render_data, bsdf_ray, 1.0e35f, shadow_light_ray_hit_info, closest_hit_info.primitive_index, /* bounce. Always 0 for ReSTIR */ 0, random_number_generator);\n            if (hit_found && !shadow_light_ray_hit_info.hit_emission.is_black())\n            {\n                // If we intersected an emissive material, compute the weight. \n                // Otherwise, the weight is 0 because of the emision being 0 so we just don't compute it\n\n                // Filling a surface to give to 'ReSTIR_DI_evaluate_target_function'\n                ReSTIRSurface surface;\n                surface.geometric_normal = closest_hit_info.geometric_normal;\n                surface.primitive_index = closest_hit_info.primitive_index;\n                surface.material = ray_payload.material;\n                surface.ray_volume_state = ray_payload.volume_state;\n                surface.shading_normal = closest_hit_info.shading_normal;\n                surface.shading_point = closest_hit_info.inter_point;\n                surface.view_direction = view_direction;\n\n                ReSTIRDISample bsdf_RIS_sample;\n                bsdf_RIS_sample.emissive_triangle_index = shadow_light_ray_hit_info.hit_prim_index;\n                bsdf_RIS_sample.point_on_light_source = bsdf_ray.origin + bsdf_ray.direction * shadow_light_ray_hit_info.hit_distance;\n                bsdf_RIS_sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n                bsdf_RIS_sample.flags |= ReSTIRDISample::flags_from_BSDF_incident_light_info(sampled_lobe_info);\n                bsdf_RIS_sample.target_function = ReSTIR_DI_evaluate_target_function<false>(render_data, bsdf_RIS_sample, surface, random_number_generator);\n\n                float light_pdf_solid_angle = 0.0f;\n                bool refraction_sampled = hippt::dot(bsdf_sampled_direction, closest_hit_info.shading_normal) < 0.0f;\n                if (!refraction_sampled)\n                {\n                    // TODO we should just allow refraction light samples instead of this\n\n                    // Only computing the light PDF if we're not refracting\n                    // \n                    // Why?\n                    // \n                    // Because right now, we allow sampling BSDF refractions. This means that we can sample a light\n                    // that is inside an object with a *BSDF sample*. However, a *light sample* to the same light cannot\n                    // be sampled because there's is going to be the surface of the object we're currently on in-between.\n                    // Basically, we are not allowing light sample refractions and so they should have a MIS weight of 0 which\n                    // is what we're doing here: the pdf of a *light sample* that refracts through a surface is 0.\n                    //\n                    // If not doing that, we're going to have bad MIS weights that don't sum up to 1\n                    // (because the BSDF sample, that should have weight 1 [or to be precise: 1 / nb_bsdf_samples]\n                    // will have weight 1 / (1 + nb_light_samples) [or to be precise: 1 / (nb_bsdf_samples + nb_light_samples)]\n                    // and this is going to cause darkening as the number of light samples grows)\n\n#if ReSTIR_DI_DoLightPresampling == KERNEL_OPTION_TRUE\n                    light_pdf_solid_angle = pdf_of_emissive_triangle_hit_solid_angle<ReSTIR_DI_LightPresamplingStrategy>(render_data, shadow_light_ray_hit_info, bsdf_sampled_direction);\n#else\n                    light_pdf_solid_angle = pdf_of_emissive_triangle_hit_solid_angle(render_data, shadow_light_ray_hit_info, bsdf_sampled_direction);\n#endif\n                }\n\n                float target_function = bsdf_RIS_sample.target_function;\n                if (bsdf_sample_pdf_solid_angle <= 0.0f || !check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, target_function / light_pdf_solid_angle / bsdf_sample_pdf_solid_angle))\n                    continue;\n\n                // Our light sampler is only chosen with probability '1.0f - envmap_candidate_probability'\n                // so we multiply that here to take that into account\n                light_pdf_solid_angle *= (1.0f - envmap_candidate_probability);\n\n                float mis_weight = balance_heuristic(bsdf_sample_pdf_solid_angle, nb_bsdf_candidates, light_pdf_solid_angle, nb_light_candidates);\n\n                float bsdf_sample_pdf_area_measure = bsdf_sample_pdf_solid_angle;\n                bsdf_sample_pdf_area_measure /= (shadow_light_ray_hit_info.hit_distance * shadow_light_ray_hit_info.hit_distance);\n                bsdf_sample_pdf_area_measure *= compute_cosine_term_at_light_source(shadow_light_ray_hit_info.hit_geometric_normal, -bsdf_sampled_direction);\n\n                float candidate_weight = mis_weight * target_function / bsdf_sample_pdf_area_measure;\n\n                reservoir.add_one_candidate(bsdf_RIS_sample, candidate_weight, random_number_generator);\n                reservoir.sanity_check(make_int2(-1, -1));\n            }\n            else if (!hit_found && render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP)\n            {\n                // Envmap hit, this becomes an envmap sample\n\n                // Not allowing refraction envmap samples here\n                // TODO fixthis, we should allow them\n                if (hippt::dot(closest_hit_info.shading_normal, bsdf_sampled_direction) > 0.0f)\n                {\n                    float envmap_pdf;\n                    ColorRGB32F envmap_radiance = envmap_eval(render_data, bsdf_sampled_direction, envmap_pdf);\n\n                    // Filling a surface to give to 'ReSTIR_DI_evaluate_target_function'\n                    ReSTIRSurface surface;\n                    surface.geometric_normal = closest_hit_info.geometric_normal;\n                    surface.primitive_index = closest_hit_info.primitive_index;\n                    surface.material = ray_payload.material;\n                    surface.ray_volume_state = ray_payload.volume_state;\n                    surface.shading_normal = closest_hit_info.shading_normal;\n                    surface.shading_point = closest_hit_info.inter_point;\n                    surface.view_direction = view_direction;\n\n                    ReSTIRDISample bsdf_RIS_sample;\n                    bsdf_RIS_sample.emissive_triangle_index = -1;\n                    // Storing in envmap space\n                    bsdf_RIS_sample.point_on_light_source = matrix_X_vec(render_data.world_settings.world_to_envmap_matrix, bsdf_sampled_direction);\n                    bsdf_RIS_sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n                    bsdf_RIS_sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_ENVMAP_SAMPLE;\n                    bsdf_RIS_sample.flags |= ReSTIRDISample::flags_from_BSDF_incident_light_info(sampled_lobe_info);\n                    bsdf_RIS_sample.target_function = ReSTIR_DI_evaluate_target_function<false>(render_data, bsdf_RIS_sample, surface, random_number_generator);\n\n                    float target_function = bsdf_RIS_sample.target_function;\n\n                    // Not taking the light sampling PDF into account in the balance heuristic because a envmap hit\n                    // (not a light surface hit) can never be sampled by a light-surface sampler and so the PDF\n                    // of the current envmap sample is always 0 for a light sampler.\n                    if (bsdf_sample_pdf_solid_angle <= 0.0f || !check_minimum_light_contribution(render_data.render_settings.minimum_light_contribution, target_function / bsdf_sample_pdf_solid_angle))\n                        continue;\n\n                    // We're evaluating the probability of choosing that BSDF-sample direction with the envmap sampler.\n                    // Because our envmap sampler is chosen only with probability 'envmap_candidate_probability', we multiply\n                    // that here to account for that\n                    envmap_pdf *= envmap_candidate_probability;\n                    float mis_weight = balance_heuristic(bsdf_sample_pdf_solid_angle, nb_bsdf_candidates, envmap_pdf, nb_light_candidates);\n                    float candidate_weight = mis_weight * target_function / bsdf_sample_pdf_solid_angle;\n\n                    reservoir.add_one_candidate(bsdf_RIS_sample, candidate_weight, random_number_generator);\n                    reservoir.sanity_check(make_int2(-1, -1));\n                }\n            }\n        }\n    }\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDIReservoir sample_initial_candidates(const HIPRTRenderData& render_data, const int2& pixel_coords, RayPayload& ray_payload, const HitInfo closest_hit_info, const float3& view_direction, Xorshift32Generator& random_number_generator)\n{\n    // If we're rendering at low resolution, only doing 1 candidate of each\n    // for better interactive framerates\n    int initial_nb_light_cand = render_data.render_settings.restir_di_settings.initial_candidates.number_of_initial_light_candidates;\n    int initial_nb_bsdf_cand = render_data.render_settings.restir_di_settings.initial_candidates.number_of_initial_bsdf_candidates;\n\n    int nb_light_candidates = render_data.render_settings.do_render_low_resolution() ? hippt::min(1, initial_nb_light_cand) : initial_nb_light_cand;\n    int nb_bsdf_candidates = render_data.render_settings.do_render_low_resolution() ? hippt::min(1, initial_nb_bsdf_cand) : initial_nb_bsdf_cand;\n    float envmap_candidate_probability = 0.0f;\n    if (render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP)\n    {\n        if (render_data.buffers.emissive_triangles_count == 0)\n            // Only the envmap to sample\n            envmap_candidate_probability = 1.0f;\n        else\n            envmap_candidate_probability = render_data.render_settings.restir_di_settings.initial_candidates.envmap_candidate_probability;\n    }\n\n    // Sampling candidates with weighted reservoir sampling\n    ReSTIRDIReservoir reservoir;\n    \n    sample_light_candidates(render_data, closest_hit_info, ray_payload, reservoir, nb_light_candidates, nb_bsdf_candidates, envmap_candidate_probability, view_direction, random_number_generator, pixel_coords);\n    sample_bsdf_candidates(render_data, closest_hit_info, ray_payload, reservoir, nb_light_candidates, nb_bsdf_candidates, envmap_candidate_probability, view_direction, random_number_generator);\n\n    reservoir.end();\n    reservoir.sanity_check(pixel_coords);\n    // There's no need to keep M > 1 here, if you have 4 light candidates and 1 BSDF candidates, that's 5 samples.\n    // But if you divide everyone by 5, everything stays correct. That allows manipulating the M-cap without having\n    // to take the number of initial candidates into account\n    reservoir.M = 1;\n\n    return reservoir;\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_DI_InitialCandidates(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_DI_InitialCandidates(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n    if (render_data.buffers.emissive_triangles_count == 0 && render_data.world_settings.ambient_light_type != AmbientLightType::ENVMAP)\n        // No initial candidates to sample since no lights\n        return;\n\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n    DevicePackedEffectiveMaterial material = render_data.g_buffer.materials[pixel_index];\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n    Xorshift32Generator random_number_generator(seed);\n\n    if (!render_data.aux_buffers.pixel_active[pixel_index] || render_data.g_buffer.first_hit_prim_index[pixel_index] == -1)\n        // Pixel inactive because of adaptive sampling, returning\n        // Or also we don't have a primary hit\n        return;\n\n    HitInfo hit_info;\n    hit_info.geometric_normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n    hit_info.shading_normal = render_data.g_buffer.shading_normals[pixel_index].unpack();\n    hit_info.inter_point = render_data.g_buffer.primary_hit_position[pixel_index];\n    hit_info.primitive_index = render_data.g_buffer.first_hit_prim_index[pixel_index];\n\n    RayPayload ray_payload;\n    ray_payload.material = material.unpack();\n    // Because this is the camera hit (and assuming the camera isn't inside volumes for now),\n    // the ray volume state after the camera hit is just an empty interior stack but with\n    // the material index that we hit pushed onto the stack. That's it. Because it is that\n    // simple, we don't have the ray volume state in the GBuffer but rather we can\n    // reconstruct the ray volume state on the fly\n    ray_payload.volume_state.reconstruct_first_hit(\n        ray_payload.material,\n        render_data.buffers.material_indices,\n        render_data.g_buffer.first_hit_prim_index[pixel_index],\n        random_number_generator);\n\n    float3 view_direction = render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n    // Producing and storing the reservoir\n    ReSTIRDIReservoir initial_candidates_reservoir = sample_initial_candidates(render_data, make_int2(x, y), ray_payload, hit_info, view_direction, random_number_generator);\n\n#if ReSTIR_DI_DoVisibilityReuse == KERNEL_OPTION_TRUE\n    ReSTIR_DI_visibility_test_kill_reservoir(render_data, initial_candidates_reservoir, hit_info.inter_point, hit_info.primitive_index, random_number_generator);\n#endif\n\n    render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[pixel_index] = initial_candidates_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DI/LightsPresampling.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RESTIR_DI_LIGHTS_PRESAMPLING_H\n#define KERNELS_RESTIR_DI_LIGHTS_PRESAMPLING_H\n\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/kernel_parameters/ReSTIR/DI/LightPresamplingParameters.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n *\n * [1] [Rearchitecting Spatiotemporal Resampling for Production] https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production\n */\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDIPresampledLight presample_envmap(const WorldSettings& world_settings, float envmap_sampling_probability, Xorshift32Generator& random_number_generator)\n{\n    ReSTIRDIPresampledLight presampled_envmap;\n    presampled_envmap.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_ENVMAP_SAMPLE;\n\n    ColorRGB32F radiance = envmap_sample(world_settings, presampled_envmap.point_on_light_source, presampled_envmap.pdf, random_number_generator);\n\n    // Moving the direction to envmap space because that's what we use for ReSTIR DI\n    presampled_envmap.point_on_light_source = matrix_X_vec(world_settings.world_to_envmap_matrix, presampled_envmap.point_on_light_source);\n    presampled_envmap.radiance = radiance;\n    presampled_envmap.pdf *= envmap_sampling_probability;\n\n    return presampled_envmap;\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDIPresampledLight presample_emissive_triangle(const HIPRTRenderData& render_data, float light_sampling_probability, Xorshift32Generator& random_number_generator)\n{\n    ReSTIRDIPresampledLight presampled_light;\n\n    // We're passing 0.0f as the position here because we do not have a position when presampling lights in screen space\n    // The position is only used for \"spatial sampling\" schemes such as ReGIR or light trees for example and such schemes are not\n    // compatible with ReSTIR light presampling anyways\n    LightSampleInformation light_sample = sample_one_emissive_triangle<ReSTIR_DI_LightPresamplingStrategy>(render_data, random_number_generator);\n\n    if (light_sample.area_measure_pdf > 0.0f)\n    {\n        presampled_light.point_on_light_source = light_sample.point_on_light;\n        presampled_light.light_source_normal = light_sample.light_source_normal;\n        presampled_light.emissive_triangle_index = light_sample.emissive_triangle_index;\n\n        // PDF in area measure\n        presampled_light.pdf = light_sample.area_measure_pdf;\n        presampled_light.pdf *= light_sampling_probability;\n        presampled_light.radiance = light_sample.emission;\n    }\n\n    return presampled_light;\n}\n\n// TODO try just passing LightPresamplingParameters in there instead of everything individually\nHIPRT_HOST_DEVICE HIPRT_INLINE ReSTIRDIPresampledLight ReSTIR_DI_presample_one_light(const HIPRTRenderData& render_data, const LightPresamplingParameters& parameters, float envmap_sampling_probability, Xorshift32Generator& random_number_generator)\n{\n    ReSTIRDIPresampledLight presampled_light;\n    if (random_number_generator() < envmap_sampling_probability)\n        presampled_light = presample_envmap(render_data.world_settings, envmap_sampling_probability, random_number_generator);\n    else\n        presampled_light = presample_emissive_triangle(render_data, 1.0f - envmap_sampling_probability, random_number_generator);\n\n    return presampled_light;\n}\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_DI_LightsPresampling(LightPresamplingParameters presampling_parameters, HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_DI_LightsPresampling(LightPresamplingParameters presampling_parameters, HIPRTRenderData render_data, int x)\n#endif\n{\n    if (render_data.buffers.emissive_triangles_count == 0 && render_data.world_settings.ambient_light_type != AmbientLightType::ENVMAP)\n        // No initial candidates to sample since no lights\n        return;\n\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n    if (x >= presampling_parameters.subset_size * presampling_parameters.number_of_subsets)\n        return;\n\n    uint32_t thread_index = x;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(thread_index + 1);\n    else\n        seed = wang_hash((thread_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n    Xorshift32Generator random_number_generator(seed);\n\n    float envmap_candidate_probability = 0.0f;\n    if (render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP)\n    {\n        if (render_data.buffers.emissive_triangles_count == 0)\n            // Only the envmap to sample\n            envmap_candidate_probability = 1.0f;\n        else\n            envmap_candidate_probability = presampling_parameters.envmap_sampling_probability;\n    }\n\n    presampling_parameters.out_light_samples[x] = ReSTIR_DI_presample_one_light(render_data, presampling_parameters, envmap_candidate_probability, random_number_generator);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DI/SpatialReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_SPATIAL_REUSE_H\n#define DEVICE_RESTIR_DI_SPATIAL_REUSE_H \n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/Jacobian.h\"\n#include \"Device/includes/ReSTIR/NeighborSimilarity.h\"\n#include \"Device/includes/ReSTIR/OptimalVisibilitySampling.h\"\n#include \"Device/includes/ReSTIR/SpatialMISWeight.h\"\n#include \"Device/includes/ReSTIR/SpatialNormalizationWeight.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/Sampling.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n *\n * [1] [Spatiotemporal reservoir resampling for real-time ray tracing with dynamic direct lighting] https://research.nvidia.com/labs/rtr/publication/bitterli2020spatiotemporal/\n * [2] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n * [3] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time - SIGGRAPH 2023 Presentation Video] https://dl.acm.org/doi/10.1145/3587423.3595511#sec-supp\n * [4] [NVIDIA RTX DI SDK - Github] https://github.com/NVIDIAGameWorks/RTXDI\n * [5] [Generalized Resampled Importance Sampling Foundations of ReSTIR] https://research.nvidia.com/publication/2022-07_generalized-resampled-importance-sampling-foundations-restir\n * [6] [Uniform disk sampling] https://rh8liuqy.github.io/Uniform_Disk.html\n * [7] [Reddit Post for the Jacobian term needed] https://www.reddit.com/r/GraphicsProgramming/comments/1eo5hqr/restir_di_light_sample_pdf_confusion/\n * [8] [Rearchitecting Spatiotemporal Resampling for Production] https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production\n */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_DI_SpatialReuse(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_DI_SpatialReuse(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n\tconst uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n\tconst uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\tif (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tuint32_t center_pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n\n\tif (!render_data.aux_buffers.pixel_active[center_pixel_index] || render_data.g_buffer.first_hit_prim_index[center_pixel_index] == -1)\n\t\t// Pixel inactive because of adaptive sampling, returning\n\t\t// Or also we don't have a primary hit\n\t\treturn;\n\n\t// Initializing the random generator\n\tunsigned int seed;\n\tif (render_data.render_settings.freeze_random)\n\t\tseed = wang_hash(center_pixel_index + 1);\n\telse if (render_data.render_settings.restir_gi_settings.common_spatial_pass.coalesced_spatial_reuse)\n\t\tseed = wang_hash((render_data.render_settings.sample_number + 1) * render_data.random_number);\n\telse\n\t\tseed = wang_hash(((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1)) * render_data.random_number);\n\tXorshift32Generator random_number_generator(seed);\n\n\tReSTIRDIReservoir* input_reservoir_buffer = render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs;\n\tReSTIRDIReservoir spatial_reuse_output_reservoir;\n\n\tint2 center_pixel_coords = make_int2(x, y);\n\n\tReSTIRDIReservoir center_pixel_reservoir = input_reservoir_buffer[center_pixel_index];\n\tif ((center_pixel_reservoir.M <= 1) && render_data.render_settings.restir_di_settings.common_spatial_pass.do_disocclusion_reuse_boost)\n\t\t// Increasing the number of spatial samples for disocclusions\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count = render_data.render_settings.restir_di_settings.common_spatial_pass.disocclusion_reuse_count;\n\n\tReSTIRSurface center_pixel_surface = get_pixel_surface(render_data, center_pixel_index, random_number_generator);\n\n\tsetup_adaptive_directional_spatial_reuse<false>(render_data, center_pixel_index, random_number_generator);\n\n\t// Only used with MIS-like weight\n\tint selected_neighbor = 0;\n\tint neighbor_heuristics_cache = 0;\n\tint valid_neighbors_count = 0;\n\tint valid_neighbors_M_sum = 0;\n\tcount_valid_spatial_neighbors<false>(render_data, center_pixel_surface, center_pixel_coords, valid_neighbors_count, valid_neighbors_M_sum, neighbor_heuristics_cache);\n\n\n\tReSTIRSpatialResamplingMISWeight<ReSTIR_DI_BiasCorrectionWeights, /* IsReSTIRGI */ false> mis_weight_function;\n\tXorshift32Generator spatial_neighbors_rng(render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_neighbors_rng_seed);\n\n\t// Resampling the neighbors. Using neighbors + 1 here so that\n\t// we can use the last iteration of the loop to resample ourselves (the center pixel)\n\t// \n\t// See the implementation of get_spatial_neighbor_pixel_index() in ReSTIR/DI/Utils.h\n\tint reused_neighbors_count = render_data.render_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count;\n\tint start_index = 0;\n\tif (valid_neighbors_M_sum == 0)\n\t\t// No valid neighbor to resample from, skip to the initial candidate right away\n\t\tstart_index = reused_neighbors_count;\n\tfor (int neighbor_index = start_index; neighbor_index < reused_neighbors_count + 1; neighbor_index++)\n\t{\n\t\t// We can already check whether or not this neighbor is going to be\n\t\t// accepted at all by checking the heuristic cache\n\t\tif (neighbor_index < reused_neighbors_count && reused_neighbors_count <= 32)\n\t\t{\n\t\t\t// If not the center pixel, we can check the heuristics, otherwise there's no need to,\n\t\t\t// we know that the center pixel will be accepted\n\t\t\t// \n\t\t\t// Our heuristics cache is a 32bit int so we can only cache 32 values are we're\n\t\t\t// going to have issues if we try to read more than that.\n\t\t\tif ((neighbor_heuristics_cache & (1 << neighbor_index)) == 0)\n\t\t\t{\n\t\t\t\t// Advancing the rng for generating the spatial neighbors since if we \"continue\" here, the spatial neighbors rng\n\t\t\t\t// isn't going to be advanced by the call to 'get_spatial_neighbor_pixel_index' below so we're doing it manually\n\t\t\t\tspatial_neighbor_advance_rng<false>(render_data, spatial_neighbors_rng);\n\n\t\t\t\t// Neighbor not passing the heuristics tests, skipping it right away\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<false>(render_data, neighbor_index, center_pixel_coords, spatial_neighbors_rng);\n\t\tif (neighbor_pixel_index == -1)\n\t\t\t// Neighbor out of the viewport\n\t\t\tcontinue;\n\n\t\tif (neighbor_index < reused_neighbors_count && reused_neighbors_count > 32)\n\t\t\t// If not the center pixel, we can check the heuristics\n\t\t\t// \n\t\t\t// Only checking the heuristic if we have more than 32 neighbors (does not fit in the heuristic cache)\n\t\t\t// If we have less than 32 neighbors, we've already checked the cache at the beginning of this for loop\n\t\t\tif (!check_neighbor_similarity_heuristics<false>(render_data,\n\t\t\t\tneighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<false>(render_data, center_pixel_surface)))\n\t\t\t\tcontinue;\n\n\t\tReSTIRDIReservoir neighbor_reservoir = input_reservoir_buffer[neighbor_pixel_index];\n\t\tfloat target_function_at_center = 0.0f;\n\n\t\tbool do_neighbor_target_function_visibility = do_include_visibility_term_or_not<false>(render_data, neighbor_index);\n\t\tif (neighbor_reservoir.UCW > 0.0f)\n\t\t{\n\t\t\tif (neighbor_index == reused_neighbors_count)\n\t\t\t\t// No need to evaluate the center sample at the center pixel, that's exactly\n\t\t\t\t// the target function of the center reservoir\n\t\t\t\ttarget_function_at_center = neighbor_reservoir.sample.target_function;\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (do_neighbor_target_function_visibility)\n\t\t\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<KERNEL_OPTION_TRUE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<KERNEL_OPTION_FALSE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t\t}\n\t\t}\n\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, neighbor_reservoir.M);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.UCW,\n\t\t\tneighbor_reservoir.sample,\n\n\t\t\tcenter_pixel_surface, neighbor_index, center_pixel_coords, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\tbool update_mc = center_pixel_reservoir.M > 0 && center_pixel_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function,\n\t\t\tcenter_pixel_reservoir.sample, center_pixel_reservoir.M, center_pixel_reservoir.sample.target_function,\n\t\t\tneighbor_reservoir,\n\n\t\t\tcenter_pixel_surface, target_function_at_center, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\tupdate_mc,/* resampling canonical */ neighbor_index == reused_neighbors_count, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\tbool update_mc = center_pixel_reservoir.M > 0 && center_pixel_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function,\n\t\t\tcenter_pixel_reservoir.sample, center_pixel_reservoir.M, center_pixel_reservoir.sample.target_function,\n\t\t\tneighbor_reservoir,\n\n\t\t\tcenter_pixel_surface, target_function_at_center, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\tupdate_mc,/* resampling canonical */ neighbor_index == reused_neighbors_count, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t// Combining as in Alg. 6 of the paper\n\t\tfloat jacobian_determinant = 1.0f;\n\t\tif (spatial_reuse_output_reservoir.combine_with(neighbor_reservoir, mis_weight, target_function_at_center, jacobian_determinant, random_number_generator))\n\t\t{\n\t\t\t// Only used with MIS-like weight\n\t\t\tselected_neighbor = neighbor_index;\n\n\t\t\tif (do_neighbor_target_function_visibility)\n\t\t\t\t// If we resampled the neighbor with visibility, then we are sure that we can set the flag\n\t\t\t\tspatial_reuse_output_reservoir.sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// If we didn't resample the neighbor with visibility\n\t\t\t\tif (neighbor_index == reused_neighbors_count)\n\t\t\t\t\t// If we just resampled the center pixel, then we can copy the visibility flag\n\t\t\t\t\tspatial_reuse_output_reservoir.sample.flags |= neighbor_reservoir.sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\t\telse\n\t\t\t\t\t// This was not the center pixel, we cannot be certain what the visibility at the center\n\t\t\t\t\t// pixel of the neighbor sample we just resample is so we're clearing the bit\n\t\t\t\t\tspatial_reuse_output_reservoir.sample.flags &= ~ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n\t\t\t}\n\t\t}\n\n\t\tspatial_reuse_output_reservoir.sanity_check(center_pixel_coords);\n\n\t\tReSTIR_optimal_visibility_sampling<false>(render_data,\n\t\t\tspatial_reuse_output_reservoir, center_pixel_reservoir,\n\t\t\tcenter_pixel_surface,\n\t\t\tneighbor_index, reused_neighbors_count,\n\t\t\trandom_number_generator);\n\t}\n\n\tfloat normalization_numerator = 1.0f;\n\tfloat normalization_denominator = 1.0f;\n\n\tReSTIRSpatialNormalizationWeight<ReSTIR_DI_BiasCorrectionWeights, /* Is ReSTIR GI */ false> normalization_function;\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface, center_pixel_coords, normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.sample, spatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface,\n\t\tcenter_pixel_coords, normalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.sample, spatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface, selected_neighbor,\n\t\tcenter_pixel_coords, normalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\tspatial_reuse_output_reservoir.end_with_normalization(normalization_numerator, normalization_denominator);\n\tspatial_reuse_output_reservoir.sanity_check(center_pixel_coords);\n\n\t// Only these 3 weighting schemes are affected\n#if (ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO \\\n\t|| ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO) \\\n\t&& ReSTIR_DI_BiasCorrectionUseVisibility == KERNEL_OPTION_TRUE \\\n\t&& (ReSTIR_DI_DoVisibilityReuse == KERNEL_OPTION_TRUE || (ReSTIR_DI_InitialTargetFunctionVisibility == KERNEL_OPTION_TRUE && ReSTIR_DI_SpatialTargetFunctionVisibility == KERNEL_OPTION_TRUE))\n\t// Why is this needed?\n\t//\n\t// Picture the case where we have visibility reuse (at the end of the initial candidates sampling pass),\n\t// visibility term in the bias correction target function (when counting the neighbors that could\n\t// have produced the picked sample) and 2 spatial reuse passes.\n\t//\n\t// The first spatial reuse pass reuses from samples that were produced with visibility in mind\n\t// (because of the visibility reuse pass that discards occluded samples). This means that we need\n\t// the visibility in the target function used when counting the neighbors that could have produced\n\t// the picked sample otherwise we may think that our neighbor could have produced the picked\n\t// sample where actually it couldn't because the sample is occluded at the neighbor. We would\n\t// then have a Z denominator (with 1/Z weights) that is too large and we'll end up with darkening.\n\t//\n\t// Now at the end of the first spatial reuse pass, the center pixel ends up with a sample that may\n\t// or may not be occluded from the center's pixel point of view. We didn't include the visibility\n\t// in the target function when resampling the neighbors (only when counting the \"correct\" neighbors\n\t// but that's all) so we are not giving a 0 weight to occluded resampled neighbors --> it is possible\n\t// that we picked an occluded sample.\n\t//\n\t// In the second spatial reuse pass, we are now going to resample from our neighbors and get some\n\t// samples that were not generated with occlusion in mind (because the resampling target function of\n\t// the first spatial reuse doesn't include visibility). Yet, we are going to weight them with occlusion\n\t// in mind. This means that we are probably going to discard samples because of occlusion that could\n\t// have been generated because they are generated without occlusion test. We end up discarding too many\n\t// samples --> brightening bias.\n\t//\n\t// With the visibility reuse at the end of each spatial pass, we force samples at the end of each\n\t// spatial reuse to take visibility into account so that when we weight them with visibility testing,\n\t// everything goes well\n\t//\n\t// As an optimization, we also do this for the pairwise MIS because pairwise MIS evaluates the target function\n\t// of reservoirs at their own location. Doing the visibility reuse here ensures that a reservoir sample at its own location\n\t// includes visibility and so we do not need to recompute the target function of the neighbors in this case. We can just\n\t// reuse the target function stored in the reservoir\n\t//\n\t// We also give the user the choice to remove bias using this option or not as it introduces very little bias\n\t// in practice (but noticeable when switching back and forth between reference image/biased image)\n\t//\n\t// We only need this if we're going to temporally reuse (because then the output of the spatial reuse must be correct\n\t// for the temporal reuse pass) or if we have multiple spatial reuse passes and this is not the last spatial pass\n\tbool not_last_spatial_pass = render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes - 1 != render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_pass_index;\n\tif (render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass || not_last_spatial_pass)\n\t\tReSTIR_DI_visibility_test_kill_reservoir(render_data, spatial_reuse_output_reservoir, center_pixel_surface.shading_point, center_pixel_surface.primitive_index, random_number_generator);\n#endif\n\n\t// M-capping so that we don't have to M-cap when reading reservoirs on the next frame\n\tif (render_data.render_settings.restir_di_settings.m_cap > 0)\n\t\t// M-capping the temporal neighbor if an M-cap has been given\n\t\tspatial_reuse_output_reservoir.M = hippt::min(spatial_reuse_output_reservoir.M, render_data.render_settings.restir_di_settings.m_cap);\n\n\trender_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs[center_pixel_index] = spatial_reuse_output_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DI/TemporalReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_DI_TEMPORAL_REUSE_H\n#define DEVICE_RESTIR_DI_TEMPORAL_REUSE_H \n\n#include \"Device/includes/Dispatcher.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/TemporalMISWeight.h\"\n#include \"Device/includes/ReSTIR/TemporalNormalizationWeight.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/DI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsTemporal.h\"\n#include \"Device/includes/Sampling.h\"\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/HitInfo.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n *\n * [1] [Spatiotemporal reservoir resampling for real-time ray tracing with dynamic direct lighting] https://research.nvidia.com/labs/rtr/publication/bitterli2020spatiotemporal/\n * [2] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n * [3] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time - SIGGRAPH 2023 Presentation Video] https://dl.acm.org/doi/10.1145/3587423.3595511#sec-supp\n * [4] [NVIDIA RTX DI SDK - Github] https://github.com/NVIDIAGameWorks/RTXDI\n * [5] [Generalized Resampled Importance Sampling Foundations of ReSTIR] https://research.nvidia.com/publication/2022-07_generalized-resampled-importance-sampling-foundations-restir\n * [6] [Uniform disk sampling] https://rh8liuqy.github.io/Uniform_Disk.html\n * [7] [Reddit Post for the Jacobian Term needed] https://www.reddit.com/r/GraphicsProgramming/comments/1eo5hqr/restir_di_light_sample_pdf_confusion/\n * [8] [Rearchitecting Spatiotemporal Resampling for Production] https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production\n * [9] [Adventures in Hybrid Rendering] https://diharaw.github.io/post/adventures_in_hybrid_rendering/\n * [10] [NVIDIA ReBLUR - Fast Denoising with Self Stabilizing Recurrent Blurs] https://developer.nvidia.com/gtc/2020/video/s22699-vid\n */\n\n// By convention, the temporal neighbor is the first one to be resampled in for loops \n// (for looping over the neighbors when resampling / computing MIS weights)\n// So instead of hardcoding 0 everywhere in the code, we just basically give it a name\n// with a #define\n#define TEMPORAL_NEIGHBOR_ID 0\n// Same when resampling the initial candidates\n#define INITIAL_CANDIDATES_ID 1\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_DI_TemporalReuse(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_DI_TemporalReuse(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n\tconst uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n\tconst uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\tif (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tuint32_t center_pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n\n\tif (!render_data.aux_buffers.pixel_active[center_pixel_index] || render_data.g_buffer.first_hit_prim_index[center_pixel_index] == -1)\n\t\t// Pixel inactive because of adaptive sampling, returning\n\t\t// Or also we don't have a primary hit\n\t\treturn;\n\n\t// Initializing the random generator\n\tunsigned int seed;\n\tif (render_data.render_settings.freeze_random)\n\t\tseed = wang_hash(center_pixel_index + 1);\n\telse\n\t\tseed = wang_hash((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\tXorshift32Generator random_number_generator(seed);\n\n\tif (render_data.render_settings.restir_di_settings.common_temporal_pass.temporal_buffer_clear_requested)\n\t\t// We requested a temporal buffer clear for ReSTIR DI\n\t\trender_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs[center_pixel_index] = ReSTIRDIReservoir();\n\n\t// Surface data of the center pixel\n\tReSTIRSurface center_pixel_surface = get_pixel_surface(render_data, center_pixel_index, random_number_generator);\n\n\tint temporal_neighbor_pixel_index = find_temporal_neighbor_index<false>(render_data,\n\t\trender_data.g_buffer.primary_hit_position[center_pixel_index], center_pixel_surface.shading_normal, center_pixel_index, random_number_generator).x;\n\tif (temporal_neighbor_pixel_index == -1 || render_data.render_settings.freeze_random)\n\t{\n\t\t// Temporal occlusion / disoccusion, temporal neighbor is invalid,\n\t\t// we're only going to resample the initial candidates so let's set that as\n\t\t// the output right away\n\t\t//\n\t\t// We're also 'disabling' temporal accumulation if the random is frozen otherwise\n\t\t// very strong correlations will creep up, corrupt the render and potentially invalidate\n\t\t// performance measurements (which we're probably trying to measure since we froze the random)\n\t\t\n\t\t// The output of this temporal pass is just the initial candidates reservoir\n\t\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[center_pixel_index];\n\t\t\n\t\treturn;\n\t}\n\t\n\t\n\tReSTIRDIReservoir temporal_neighbor_reservoir = render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs[temporal_neighbor_pixel_index];\n\tif (temporal_neighbor_reservoir.M == 0)\n\t{\n\t\t// No temporal neighbor, the output of this temporal pass is just the initial candidates reservoir\n\t\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[center_pixel_index];\n\n\t\treturn;\n\t}\n\n\tReSTIRDIReservoir temporal_reuse_output_reservoir;\n\tReSTIRSurface temporal_neighbor_surface = get_pixel_surface(render_data, temporal_neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\tif (temporal_neighbor_surface.material.is_emissive())\n\t{\n\t\t// Can't resample the temporal neighbor if it's emissive so output the initial candidates right away\n\t\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[center_pixel_index];\n\n\t\treturn;\n\t}\n\n\tReSTIRTemporalResamplingMISWeight<ReSTIR_DI_BiasCorrectionWeights, /* IsReSTIR GI */ false> mis_weight_function;\n\n\n\t// Only used with MIS-like weight\n\t// \n\t// Will keep the index of the neighbor that has been selected by resampling. \n\t// Either 0 or 1 for the temporal resampling pass\n\tint selected_neighbor = 0;\n\n\t// /* ------------------------------- */\n\t// Resampling the temporal neighbor\n\t// /* ------------------------------- */\n\n\tReSTIRDIReservoir initial_candidates_reservoir = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs[center_pixel_index];\n\tif (temporal_neighbor_reservoir.M > 0)\n\t{\n\t\tfloat target_function_at_center = 0.0f;\n\t\tif (temporal_neighbor_reservoir.UCW > 0.0f)\n\t\t\t// Only resampling if the temporal neighbor isn't empty\n\t\t\t//\n\t\t\t// If the temporal neighbor's reservoir is empty, then we do not get\n\t\t\t// inside that if() and the target function stays at 0.0f which eliminates\n\t\t\t// most of the computations afterwards\n\t\t\t//\n\t\t\t// Matching the visibility used here with the bias correction mode for ease \n\t\t\t// of use (and because manually handling the visibility in the target \n\t\t\t// function of the temporal reuse is tricky for the user to use in \n\t\t\t// combination with other parameters and on top of that, it makes little \n\t\t\t// technical sense since our temporal neighbor is supposed to be unoccluded \n\t\t\t// (unless geometry moves around in the scene but that's another problem)\n\t\t\ttarget_function_at_center = ReSTIR_DI_evaluate_target_function<ReSTIR_DI_BiasCorrectionUseVisibility>(render_data, temporal_neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, temporal_neighbor_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, \n\n\t\t\ttemporal_neighbor_reservoir.sample,\n\t\t\tinitial_candidates_reservoir.M, \n\n\t\t\ttemporal_neighbor_surface, center_pixel_surface, \n\t\t\ttemporal_neighbor_reservoir.M, TEMPORAL_NEIGHBOR_ID, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\t\n\t\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\t\tcenter_pixel_surface, temporal_neighbor_surface, \n\t\t\ttarget_function_at_center, TEMPORAL_NEIGHBOR_ID, \n\t\t\trandom_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\n\t\t\ttarget_function_at_center, TEMPORAL_NEIGHBOR_ID, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t// Combining as in Alg. 6 of the paper\n\t\tfloat jacobian_determinant = 1.0f;\n\t\tif (temporal_reuse_output_reservoir.combine_with(temporal_neighbor_reservoir, temporal_neighbor_resampling_mis_weight, target_function_at_center, jacobian_determinant, random_number_generator))\n\t\t{\n\t\t\t// Only used with MIS-like weight\n\t\t\tselected_neighbor = TEMPORAL_NEIGHBOR_ID;\n\n\t\t\t// Using ReSTIR_DI_BiasCorrectionUseVisibility here because that's what we use in the resampling target function\n#if ReSTIR_DI_BiasCorrectionUseVisibility == KERNEL_OPTION_FALSE\n\t\t\t// We cannot be certain that the visibility of the temporal neighbor\n\t\t\t// chosen is exactly the same so we're clearing the unoccluded flag\n\t\t\ttemporal_reuse_output_reservoir.sample.flags &= ~ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#else\n\t\t\t// However, if we're using the visibility in the target function, then\n\t\t\t// the temporal neighobr could never have been selected unless it is\n\t\t\t// unoccluded so we can add the flag\n\t\t\ttemporal_reuse_output_reservoir.sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#endif\n\t\t}\n\t\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\t}\n\n\t// /* ------------------------------- */\n\t// Resampling the initial candidates\n\t// /* ------------------------------- */\n\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(initial_candidates_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(initial_candidates_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, initial_candidates_reservoir);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\tinitial_candidates_reservoir.sample,\n\t\tinitial_candidates_reservoir.M, \n\n\t\ttemporal_neighbor_surface, center_pixel_surface, \n\t\ttemporal_neighbor_reservoir.M, INITIAL_CANDIDATES_ID, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\n\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir, \n\t\tcenter_pixel_surface, temporal_neighbor_surface, \n\t\t/* unused */ 0.0f, INITIAL_CANDIDATES_ID, \n\t\trandom_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO ||ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\n\t\t/* unused */ 0.0f, INITIAL_CANDIDATES_ID, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\tif (temporal_reuse_output_reservoir.combine_with(initial_candidates_reservoir, initial_candidates_mis_weight, initial_candidates_reservoir.sample.target_function, /* jacobian is 1 when reusing at the exact same spot */ 1.0f, random_number_generator))\n\t{\n\t\t// Only used with MIS-like weight\n\t\tselected_neighbor = INITIAL_CANDIDATES_ID;\n\n\t\t// Using ReSTIR_DI_BiasCorrectionUseVisibility here because that's what we use in the resampling target function\n#if ReSTIR_DI_BiasCorrectionUseVisibility == KERNEL_OPTION_FALSE\n\t\t// We resampled the center pixel so we can copy the unoccluded flag\n\t\ttemporal_reuse_output_reservoir.sample.flags |= initial_candidates_reservoir.sample.flags & ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#else\n\t\t// However, if we're using the visibility in the target function, then\n\t\t// we are sure that the sample is now unoccluded\n\t\ttemporal_reuse_output_reservoir.sample.flags |= ReSTIRDISampleFlags::RESTIR_DI_FLAGS_UNOCCLUDED;\n#endif\n\t}\n\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\n\tfloat normalization_numerator = 1.0f;\n\tfloat normalization_denominator = 1.0f;\n\n\tReSTIRTemporalNormalizationWeight<ReSTIR_DI_BiasCorrectionWeights, /* Is ReSTIR GI */ false> normalization_function;\n#if ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n\tnormalization_function.get_normalization(temporal_reuse_output_reservoir.weight_sum,\n\t\tinitial_candidates_reservoir.M, temporal_neighbor_reservoir.M, normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\tnormalization_function.get_normalization(render_data, \n\t\ttemporal_reuse_output_reservoir.sample, temporal_reuse_output_reservoir.weight_sum,\n\t\tinitial_candidates_reservoir.M, temporal_neighbor_reservoir.M, center_pixel_surface, temporal_neighbor_surface, \n\t\tnormalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n\tnormalization_function.get_normalization(render_data, \n\t\ttemporal_reuse_output_reservoir.sample, temporal_reuse_output_reservoir.weight_sum,\n\t\tinitial_candidates_reservoir.M, temporal_neighbor_reservoir.M, center_pixel_surface, temporal_neighbor_surface,\n\t\tselected_neighbor, normalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_DI_BiasCorrectionWeights == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\ttemporal_reuse_output_reservoir.end_with_normalization(normalization_numerator, normalization_denominator);\n\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\n\t// M-capping so that we don't have to M-cap when reading reservoirs on the next frame\n\tif (render_data.render_settings.restir_di_settings.m_cap > 0)\n\t\t// M-capping the temporal neighbor if an M-cap has been given\n\t\ttemporal_reuse_output_reservoir.M = hippt::min(temporal_reuse_output_reservoir.M, render_data.render_settings.restir_di_settings.m_cap);\n\n\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs[center_pixel_index] = temporal_reuse_output_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/DirectionalReuseCompute.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RESTIR_DIRECTIONAL_REUSE_COMPUTE_H\n#define KERNELS_RESTIR_DIRECTIONAL_REUSE_COMPUTE_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/ReSTIR/NeighborSimilarity.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#define NB_RADIUS 32\n#if ComputingSpatialDirectionalReuseForReSTIRGI == KERNEL_OPTION_TRUE\n#define NB_SAMPLES_PER_RADIUS_INTERNAL ReSTIR_GI_SpatialDirectionalReuseBitCount // CHANGE THIS ONE\n#else\n#define NB_SAMPLES_PER_RADIUS_INTERNAL ReSTIR_DI_SpatialDirectionalReuseBitCount // CHANGE THIS ONE\n#endif\n\n#define NB_SAMPLES_PER_RADIUS (NB_SAMPLES_PER_RADIUS_INTERNAL > 64 ? 64 : NB_SAMPLES_PER_RADIUS_INTERNAL) // Max to 64 for unsigned long long int\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_Directional_Reuse_Compute(HIPRTRenderData render_data,\n    unsigned int* __restrict__ out_directional_reuse_masks_buffer_u,\n    unsigned long long int* __restrict__ out_directional_reuse_masks_buffer_ull,\n    unsigned char* __restrict__ out_adaptive_radius_buffer)\n#else\ntemplate <bool IsReSTIRGI>\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_Directional_Reuse_Compute(HIPRTRenderData render_data, int x, int y,\n    unsigned int* __restrict__ out_directional_reuse_masks_buffer_u,\n    unsigned long long int* __restrict__ out_directional_reuse_masks_buffer_ull,\n    unsigned char* __restrict__ out_adaptive_radius_buffer)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t center_pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (!render_data.aux_buffers.pixel_active[center_pixel_index])\n        // Pixel isn't active because of adaptive sampling or render resolution scaling\n        return;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(center_pixel_index + 1);\n    else\n        seed = wang_hash((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    // Clearing previous data\n #if NB_SAMPLES_PER_RADIUS > 32\n     out_directional_reuse_masks_buffer_ull[center_pixel_index] = 0;\n #else\n     out_directional_reuse_masks_buffer_u[center_pixel_index] = 0;\n #endif\n    out_adaptive_radius_buffer[center_pixel_index] = 0;\n\n\n\n\n\n#ifdef __KERNELCC__\n    // If on the GPU, using the 'ComputingSpatialDirectionalReuseForReSTIRGI' macro\n    // (that is passed to the compiler in the ReSTIRDI/GI RenderPass.cpp)\n    //\n    // To get the settings\n    ReSTIRCommonSpatialPassSettings spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<ComputingSpatialDirectionalReuseForReSTIRGI>(render_data);\n#else\n    // On the CPU, it is the template argument that dictates whether this is for ReSTIR DI or GI\n    ReSTIRCommonSpatialPassSettings spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n#endif\n\n    float3 center_shading_point = render_data.g_buffer.primary_hit_position[center_pixel_index];\n#ifdef __KERNELCC__\n    float3 center_normal = ReSTIRSettingsHelper::get_restir_neighbor_similarity_settings<ComputingSpatialDirectionalReuseForReSTIRGI>(render_data).reject_using_geometric_normals ? render_data.g_buffer.geometric_normals[center_pixel_index].unpack() : render_data.g_buffer.shading_normals[center_pixel_index].unpack();\n#else\n    float3 center_normal = ReSTIRSettingsHelper::get_restir_neighbor_similarity_settings<IsReSTIRGI>(render_data).reject_using_geometric_normals ? render_data.g_buffer.geometric_normals[center_pixel_index].unpack() : render_data.g_buffer.shading_normals[center_pixel_index].unpack();\n#endif\n\n    float best_area = 0.0f;\n    int best_radius_index = 0;\n    // Each long long int in there contains, in each bit, whether or not the direction for that radius is reusable or not\n    unsigned long long int valid_samples_per_radius[NB_RADIUS] = { 0 };\n    for (int radius_index = 0; radius_index < NB_RADIUS; radius_index++)\n    {\n        float current_radius = spatial_pass_settings.minimum_per_pixel_reuse_radius + (radius_index / (float)NB_RADIUS) * (spatial_pass_settings.reuse_radius - spatial_pass_settings.minimum_per_pixel_reuse_radius);\n        float current_radius_circle_area = M_PI * current_radius * current_radius;\n\n        // Now sampling a bunch of neighbors *on* that radius, exactly at that radius distance from the center (i.e. *not* within the disk of that radius)\n        float area_at_current_radius = 0.0f;\n        for (int sample_index = 0; sample_index < NB_SAMPLES_PER_RADIUS; sample_index++)\n        {\n            if (radius_index > 0)\n                if (!(valid_samples_per_radius[radius_index - 1] & (1ull << sample_index)))\n                    // If this direction wasn't accepted at the previous radius\n                    continue;\n\n            float theta = sample_index / (float)NB_SAMPLES_PER_RADIUS * M_TWO_PI;\n            float x_circle = current_radius * cosf(theta);\n            float y_circle = current_radius * sinf(theta);\n\n            int2 neighbor_offset_in_disk = make_int2(static_cast<int>(roundf(x_circle)), static_cast<int>(roundf(y_circle)));\n            int2 neighbor_pixel_coords = make_int2(x, y) + neighbor_offset_in_disk;\n            if (neighbor_pixel_coords.x < 0 || neighbor_pixel_coords.x >= render_data.render_settings.render_resolution.x ||\n                neighbor_pixel_coords.y < 0 || neighbor_pixel_coords.y >= render_data.render_settings.render_resolution.y)\n                // Rejecting the sample if it's outside of the viewport\n                continue;\n\n            int neighbor_index = neighbor_pixel_coords.x + neighbor_pixel_coords.y * render_data.render_settings.render_resolution.x;\n\n#ifdef __KERNELCC__\n            // If on the GPU, using the 'ComputingSpatialDirectionalReuseForReSTIRGI' macro\n            // (that is passed to the compiler in the ReSTIRDI/GI RenderPass.cpp)\n            //\n            // To determine whether this is for ReSTIR DI or GI\n            if (!check_neighbor_similarity_heuristics<ComputingSpatialDirectionalReuseForReSTIRGI>(render_data, neighbor_index, center_pixel_index, center_shading_point, center_normal))\n                continue;\n#else\n            // On the CPU, it is the template argument that dictates whether this is for ReSTIR DI or GI\n            if (!check_neighbor_similarity_heuristics<IsReSTIRGI>(render_data, neighbor_index, center_pixel_index, center_shading_point, center_normal))\n                continue;\n#endif\n\n            valid_samples_per_radius[radius_index] |= (1ull << sample_index);\n            area_at_current_radius += current_radius_circle_area * (1.0f / NB_SAMPLES_PER_RADIUS);\n        }\n\n        if (best_area < area_at_current_radius)\n        {\n            best_area = area_at_current_radius;\n            best_radius_index = radius_index;\n        }\n    }\n\n    // Computing the actual radius from the best radius index\n    float best_radius = spatial_pass_settings.minimum_per_pixel_reuse_radius + (best_radius_index / (float)NB_RADIUS) * (spatial_pass_settings.reuse_radius - spatial_pass_settings.minimum_per_pixel_reuse_radius);\n    if (best_area == 0.0f)\n        best_radius = 0.0f;\n\n    out_adaptive_radius_buffer[center_pixel_index] = (unsigned char)best_radius;\n#if NB_SAMPLES_PER_RADIUS > 32\n    out_directional_reuse_masks_buffer_ull[center_pixel_index] = valid_samples_per_radius[best_radius_index];\n#else\n    // Extracting the low 32 bits\n    out_directional_reuse_masks_buffer_u[center_pixel_index] = (unsigned int)(valid_samples_per_radius[best_radius_index] & 0x00000000FFFFFFFFF);\n#endif\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/GI/InitialCandidates.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RESTIR_GI_INITIAL_CANDIDATES_H\n#define KERNELS_RESTIR_GI_INITIAL_CANDIDATES_H\n\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/LightSampling/Lights.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/GI/InitialCandidatesUtils.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_GI_InitialCandidates(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_GI_InitialCandidates(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (!render_data.aux_buffers.pixel_active[pixel_index])\n        // Pixel isn't active because of adaptive sampling or render resolution scaling\n        return;\n\n    if (render_data.render_settings.do_render_low_resolution())\n        // Reducing the number of bounces to 3 if rendering at low resolution\n        // for better interactivity\n        render_data.render_settings.nb_bounces = hippt::min(3, render_data.render_settings.nb_bounces);\n\n\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    // Initializing the closest hit info the information from the camera ray pass\n    HitInfo closest_hit_info;\n    closest_hit_info.inter_point = render_data.g_buffer.primary_hit_position[pixel_index];\n    closest_hit_info.geometric_normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n    closest_hit_info.shading_normal = render_data.g_buffer.shading_normals[pixel_index].unpack();\n    closest_hit_info.primitive_index = render_data.g_buffer.first_hit_prim_index[pixel_index];\n\n    // Initializing the ray with the information from the camera ray pass\n    hiprtRay ray;\n    ray.direction = -render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n\n    RayPayload ray_payload;\n    ray_payload.next_ray_state = RayState::BOUNCE;\n    ray_payload.material = render_data.g_buffer.materials[pixel_index].unpack();\n\n    // Because this is the camera hit (and assuming the camera isn't inside volumes for now),\n    // the ray volume state after the camera hit is just an empty interior stack but with\n    // the material index that we hit pushed onto the stack. That's it. Because it is that\n    // simple, we don't have the ray volume state in the GBuffer but rather we can\n    // reconstruct the ray volume state on the fly\n    ray_payload.volume_state.reconstruct_first_hit(\n        ray_payload.material,\n        render_data.buffers.material_indices,\n        closest_hit_info.primitive_index,\n        random_number_generator);\n\n    bool intersection_found = closest_hit_info.primitive_index != -1;\n\n    ReSTIRSurface initial_surface;\n    initial_surface.geometric_normal = closest_hit_info.geometric_normal;\n    initial_surface.shading_normal = closest_hit_info.shading_normal;\n    initial_surface.primitive_index = closest_hit_info.primitive_index;\n    initial_surface.material = ray_payload.material;\n    initial_surface.ray_volume_state = ray_payload.volume_state;\n    initial_surface.shading_point = closest_hit_info.inter_point;\n    initial_surface.view_direction = -ray.direction;\n\n    float bsdf_sample_pdf = 0.0f;\n    ReSTIRGISample restir_gi_initial_sample;\n\n    ColorRGB32F incoming_radiance_to_visible_point;\n    ColorRGB32F incoming_radiance_to_sample_point;\n    ColorRGB32F throughput_to_visible_point = ColorRGB32F(1.0f);\n\n    // + 1 to nb_bounces here because we want \"0\" bounces to still act as one\n    // hit and to return some color\n    for (int& bounce = ray_payload.bounce; bounce < render_data.render_settings.nb_bounces + 1; bounce++)\n    {\n        if (ray_payload.next_ray_state != RayState::MISSED)\n        {\n            if (bounce > 0)\n            {\n                if (bounce == 1)\n                    // This is going to be tracing the ray from the visible point to the sample:\n                    // we're saving the random seed used during the BVH traversal to be able to reproduce\n                    // alpha tests\n                    restir_gi_initial_sample.visible_to_sample_point_alpha_test_random_seed = random_number_generator.m_state.seed;\n\n                intersection_found = path_tracing_find_indirect_bounce_intersection(render_data, ray, ray_payload, closest_hit_info, random_number_generator);\n            }\n\n            if (intersection_found)\n            {\n                if (bounce == 0)\n                    store_denoiser_AOVs(render_data, pixel_index, closest_hit_info.shading_normal, ray_payload.material.base_color);\n                else if (bounce > 0)\n                {\n                    bool ReGIR_primary_hit = render_data.render_settings.regir_settings.compute_is_primary_hit(ray_payload);\n\n                    // Storing data for ReGIR representative points\n                    ReGIR_update_representative_data(render_data, closest_hit_info.inter_point, closest_hit_info.geometric_normal, render_data.current_camera, closest_hit_info.primitive_index, ReGIR_primary_hit, ray_payload.material);\n                }\n\n                if (bounce == 1)\n                {\n                    restir_gi_initial_sample.sample_point_geometric_normal.pack(closest_hit_info.geometric_normal);\n                    restir_gi_initial_sample.sample_point = closest_hit_info.inter_point;\n                    restir_gi_initial_sample.sample_point_primitive_index = closest_hit_info.primitive_index;\n                    restir_gi_initial_sample.sample_point_rough_enough = MaterialUtils::can_do_light_sampling(ray_payload.material, render_data.render_settings.restir_gi_settings.neighbor_sample_point_roughness_threshold);\n                }\n\n                if (bounce > 0)\n                {\n                    // Estimating with a throughput of 1.0f here because we're going to apply the throughput ourselves\n                    ColorRGB32F direct_lighting_estimation = estimate_direct_lighting(render_data, ray_payload, ColorRGB32F(1.0f), closest_hit_info, -ray.direction, x, y, random_number_generator);\n                    // Updating the cumulated outgoing radiance of our path to the visible point\n                    incoming_radiance_to_visible_point += clamp_direct_lighting_estimation(direct_lighting_estimation * throughput_to_visible_point, render_data.render_settings.indirect_contribution_clamp, bounce);\n                }\n\n                float bsdf_pdf;\n                BSDFIncidentLightInfo incident_light_info;\n                bool valid_indirect_bounce = restir_gi_compute_next_indirect_bounce(render_data, ray_payload, throughput_to_visible_point, closest_hit_info, -ray.direction, ray, random_number_generator, &incident_light_info, &bsdf_pdf);\n                if (!valid_indirect_bounce)\n                    // Bad BSDF sample (under the surface), killed by russian roulette, ...\n                    break;\n\n                if (bounce == 0)\n                {\n                    restir_gi_initial_sample.incident_light_info_at_visible_point = incident_light_info;\n                    bsdf_sample_pdf = bsdf_pdf;\n                }\n            }\n            else\n            {\n                if (bounce == 1)\n                {\n                    // For envmap path, the direction is stored in the hit point\n                    restir_gi_initial_sample.sample_point = ray.direction;\n                    // -1 for the primitive index indicates that this is an envmap sample\n                    restir_gi_initial_sample.sample_point_primitive_index = -1;\n                }\n\n                incoming_radiance_to_visible_point += path_tracing_miss_gather_envmap(render_data, throughput_to_visible_point, ray.direction, ray_payload.bounce, pixel_index);\n\n                ray_payload.next_ray_state = RayState::MISSED;\n            }\n        }\n        else if (ray_payload.next_ray_state == RayState::MISSED)\n            break;\n    }\n\n    // Checking for NaNs / negative value samples. Output \n    if (!sanity_check(render_data, ray_payload.ray_color, x, y))\n        return;\n\n    // If we got here, this means that we still have at least one ray active\n    // This is a concurrent write by the way but we don't really care, everyone is writing\n    // the same value\n    render_data.aux_buffers.still_one_ray_active[0] = 1;\n\n    restir_gi_initial_sample.incoming_radiance_to_visible_point.pack(incoming_radiance_to_visible_point);\n    restir_gi_initial_sample.target_function = ReSTIR_GI_evaluate_target_function<true, false>(render_data, restir_gi_initial_sample, initial_surface, random_number_generator);\n\n    float resampling_weight = 0.0f;\n    float mis_weight = 1.0f;\n    float target_function = restir_gi_initial_sample.target_function;\n    float source_pdf = bsdf_sample_pdf;\n    if (source_pdf > 0.0f)\n        resampling_weight = mis_weight * restir_gi_initial_sample.target_function / source_pdf;\n\n    ReSTIRGIReservoir restir_gi_initial_reservoir;\n    restir_gi_initial_reservoir.add_one_candidate(restir_gi_initial_sample, resampling_weight, random_number_generator);\n    restir_gi_initial_reservoir.end();\n    restir_gi_initial_reservoir.sanity_check(make_int2(x, y));\n\n    render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer[pixel_index] = restir_gi_initial_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/GI/Shading.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RESTIR_GI_SHADING_H\n#define KERNELS_RESTIR_GI_SHADING_H\n\n#include \"Device/includes/LightSampling/Envmap.h\"\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/LightSampling/Lights.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\n // ReSTIR GI shading/resampling is still a bit broken, there's still some brightening bias coming from\n // I don't know where, supposedly when the BRDF starts to include smooth/glossy BRDFs\n // \n // This manifests the most on specular 1 + roughness 0 everywhere in the scene\n // Maybe the bias is also there with a Lambertian BRDF but I could never see it. Maybe it's there but it's just so subtle that it's invisible\n // \n // -------------------------- WHAT WE KNOW --------------------------\n // - Still biased with no alpha tests\n // - Do we absolutely have correct convergence on Lambertian & Oren Nayar? --> hard to verify, looks like it?\n // - Is it the glass that is biased? -------> No\n // - 1/Z is also biased, even without the jacobian rejection heuristic\n // - It's not the adaptive sampling that is messed up\n // - Definitely has some bias (very little but there) with everything using a metallic BRDF, roughness 0.1, 50 bounces. Contemporary bedroom\n // - There is some bias in the contemporary bedroom at 1 bounce, everything specular, 0 roughness, with RIS light sampling + envmap sampling\n // - Can't see any bias with lambertian/oren nayar contemporary bedroom + NEE + Envmap\n // - There is still some bias with a roughness 1.0f metallic\n // - Not a normal mapping issue?\n // - With everything specular at IOR 1.0f but roughness 1.0f, there's basically no bias. Even though the specular layer has no effect because of IOR 1.0f. So if the roughness of an inexistant layer changes the bias, it can only be a PDF issue?\n // - Because there is no bias on full Lambertian, this isn't a jacobian issue?\n // \n // With a specular IOR 1 + diffuse lobe setup\n //     - increasing the roughness of the specular (still at IOR 1) reduces the bias\n //     - artificially fixing the proba of sampling the specular to 90 % and diffuse 10 % increases the bias quiiite a lot(but still converges correctly without ReSTIR and when reusing 0 neighbor).Even more so when approaching 100 % specular(but not quite 100 %)\n //     - sampling the specular & diffuse lobe based on the fresnel reflectance yields a different bias vs.sampling 50 / 50 (or 90 / 10).\n //     - resampling more spatial neighbors makes things a bit worse.But only up to a certain point.For example, 6 spatial reuse passes with 16 candidates each(which is huge) is barely worse than 1 spatial pass @ 16 candidates\n //     - with 0 spatial neighbor reuse, it converges correctly, no matter the BSDF / sampling probas / ...\n //     - there seems to be no bias with only 1 bounce(i.e.with paths, being at most : \"camera -> first hit -> second hit\").Bias only comes in with # of bounce >= 2\n // -------------------------- WHAT WE KNOW --------------------------\n // \n // -------------------------- DIRTY FIX RIGHT NOW --------------------------\n // - No double BSDF shading\n // - No double BSDF in target function\n // - Reuse on specular is ok\n // - Using rejection heuristics is better\n // -------------------------- DIRTY FIX RIGHT NOW --------------------------\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_GI_Shading(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_GI_Shading(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    if (!render_data.aux_buffers.pixel_active[pixel_index])\n        return;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    hiprtRay ray;\n    ray.direction = -render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n\n    HitInfo closest_hit_info;\n    closest_hit_info.primitive_index = render_data.g_buffer.first_hit_prim_index[pixel_index];\n    if (closest_hit_info.primitive_index == -1)\n    {\n        // Geometry miss, directly into the envmap\n        ColorRGB32F envmap_radiance = path_tracing_miss_gather_envmap(render_data, ColorRGB32F(1.0f), ray.direction, 0, pixel_index);\n        path_tracing_accumulate_color(render_data, envmap_radiance, pixel_index);\n\n        return;\n    }\n\n    closest_hit_info.inter_point = render_data.g_buffer.primary_hit_position[pixel_index];\n    closest_hit_info.shading_normal = render_data.g_buffer.shading_normals[pixel_index].unpack();\n\n    // Initializing the ray with the information from the camera ray pass\n    RayPayload ray_payload;\n    ray_payload.next_ray_state = RayState::BOUNCE;\n    // Loading the first hit in the ray payload\n    ray_payload.material = render_data.g_buffer.materials[pixel_index].unpack();\n    ray_payload.volume_state.reconstruct_first_hit(\n        ray_payload.material,\n        render_data.buffers.material_indices,\n        closest_hit_info.primitive_index,\n        random_number_generator);\n\n    float3 view_direction = render_data.g_buffer.get_view_direction(render_data.current_camera.position, pixel_index);\n\n    ColorRGB32F camera_outgoing_radiance;\n    if (render_data.render_settings.enable_direct)\n        // Adding the direct lighting contribution at the first hit in the direction of the camera\n        camera_outgoing_radiance += estimate_direct_lighting(render_data, ray_payload, closest_hit_info, view_direction, x, y, random_number_generator);\n\n    ReSTIRGIReservoir resampling_reservoir = render_data.render_settings.restir_gi_settings.restir_output_reservoirs[pixel_index];\n    if (render_data.render_settings.nb_bounces > 0)\n    {\n        // Only doing the ReSTIR GI stuff if we have more than 1 bounce\n\n        if (resampling_reservoir.UCW > 0.0f)\n        {\n            // Only doing the shading if we do actually have a sample\n\n            float3 geometric_normal = render_data.g_buffer.geometric_normals[pixel_index].unpack();\n\n            float3 restir_resampled_indirect_direction;\n            if (resampling_reservoir.sample.is_envmap_path())\n                restir_resampled_indirect_direction = resampling_reservoir.sample.sample_point;\n            else\n                restir_resampled_indirect_direction = hippt::normalize(resampling_reservoir.sample.sample_point - closest_hit_info.inter_point);\n\n            // Computing the BSDF throughput at the first hit\n            //  - view direction: towards the camera\n            //  - incident light direction: towards the sample point\n            float bsdf_pdf_first_hit;\n            BSDFContext bsdf_first_hit_context(view_direction, closest_hit_info.shading_normal, geometric_normal, restir_resampled_indirect_direction, resampling_reservoir.sample.incident_light_info_at_visible_point, ray_payload.volume_state, false, ray_payload.material, 0, 0.0f);\n            ColorRGB32F bsdf_color_first_hit = bsdf_dispatcher_eval(render_data, bsdf_first_hit_context, bsdf_pdf_first_hit, random_number_generator);\n\n            ColorRGB32F first_hit_throughput;\n            if (bsdf_pdf_first_hit > 0.0f)\n                first_hit_throughput = bsdf_color_first_hit * hippt::abs(hippt::dot(restir_resampled_indirect_direction, closest_hit_info.shading_normal)) * resampling_reservoir.UCW;\n\n            if (resampling_reservoir.sample.is_envmap_path())\n                camera_outgoing_radiance += path_tracing_miss_gather_envmap(render_data, first_hit_throughput, restir_resampled_indirect_direction, 1, pixel_index);\n            else\n                camera_outgoing_radiance += first_hit_throughput * resampling_reservoir.sample.incoming_radiance_to_visible_point.unpack();\n        }\n    }\n\n        // Setting the 'camera_outgoing_radiance' into the ray color just for the call to 'sanity_check'\n    ray_payload.ray_color = camera_outgoing_radiance;\n    if (!sanity_check(render_data, ray_payload.ray_color, x, y))\n        return;\n\n    if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::FINAL_RESERVOIR_UCW)\n        path_tracing_accumulate_color(render_data, ColorRGB32F(resampling_reservoir.UCW) * render_data.render_settings.restir_gi_settings.debug_view_scale_factor, pixel_index);\n    else if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::TARGET_FUNCTION)\n        path_tracing_accumulate_color(render_data, ColorRGB32F(resampling_reservoir.sample.target_function) * render_data.render_settings.restir_gi_settings.debug_view_scale_factor, pixel_index);\n    else if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::WEIGHT_SUM)\n        path_tracing_accumulate_color(render_data, ColorRGB32F(resampling_reservoir.weight_sum) * render_data.render_settings.restir_gi_settings.debug_view_scale_factor, pixel_index);\n    else if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::M_COUNT)\n        path_tracing_accumulate_color(render_data, ColorRGB32F(resampling_reservoir.M) * render_data.render_settings.restir_gi_settings.debug_view_scale_factor, pixel_index);\n    else if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::PER_PIXEL_REUSE_RADIUS && render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius != nullptr)\n    {\n        float radius_percentage = (render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius[pixel_index] / (float)render_data.render_settings.restir_gi_settings.common_spatial_pass.reuse_radius);\n        ColorRGB32F debug_color = hippt::lerp(ColorRGB32F(2.0f, 0.0f, 0.0f), ColorRGB32F(0.0f, 2.0f, 0.0f), radius_percentage);\n\n        debug_set_final_color(render_data, x, y, debug_color);\n    }\n    else if (render_data.render_settings.restir_gi_settings.debug_view == ReSTIRGIDebugView::PER_PIXEL_VALID_DIRECTIONS_PERCENTAGE && render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius != nullptr)\n    {\n        unsigned char accepted_directions = hippt::popc(ReSTIRSettingsHelper::get_spatial_reuse_direction_mask_ull<true>(render_data, pixel_index));\n        float accepted_percentage = accepted_directions / 32.0f;\n        ColorRGB32F debug_color = hippt::lerp(ColorRGB32F(2.0f, 0.0f, 0.0f), ColorRGB32F(0.0f, 2.0f, 0.0f), accepted_percentage);\n\n        debug_set_final_color(render_data, x, y, debug_color);\n    }\n    else\n        // Regular output\n        path_tracing_accumulate_color(render_data, camera_outgoing_radiance, pixel_index);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/GI/SpatialReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_ReSTIR_GI_SPATIAL_REUSE_H\n#define DEVICE_ReSTIR_GI_SPATIAL_REUSE_H \n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/ReSTIR/Jacobian.h\"\n#include \"Device/includes/ReSTIR/NeighborSimilarity.h\"\n#include \"Device/includes/ReSTIR/OptimalVisibilitySampling.h\"\n#include \"Device/includes/ReSTIR/SpatialMISWeight.h\"\n#include \"Device/includes/ReSTIR/SpatialNormalizationWeight.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsSpatial.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/TargetFunction.h\"\n\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n  *\n  * [1] [ReSTIR GI: Path Resampling for Real-Time Path Tracing] https://research.nvidia.com/publication/2021-06_restir-gi-path-resampling-real-time-path-tracing\n  * [2] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n  * [3] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time - SIGGRAPH 2023 Presentation Video] https://dl.acm.org/doi/10.1145/3587423.3595511#sec-supp\n  */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_GI_SpatialReuse(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_GI_SpatialReuse(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n\tconst uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n\tconst uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\tif (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tuint32_t center_pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n\tint2 center_pixel_coords = make_int2(x, y);\n\n\tif (!render_data.aux_buffers.pixel_active[center_pixel_index] || render_data.g_buffer.first_hit_prim_index[center_pixel_index] == -1)\n\t{\n\t\t// Pixel inactive because of adaptive sampling, returning\n\t\t// Or also we don't have a primary hit\n\t\trender_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs[center_pixel_index] = ReSTIRGIReservoir();\n\n\t\treturn;\n\t}\n\n\t// Initializing the random generator\n\tunsigned int seed;\n\tif (render_data.render_settings.freeze_random)\n\t\tseed = wang_hash(center_pixel_index + 1);\n\telse if (render_data.render_settings.restir_gi_settings.common_spatial_pass.coalesced_spatial_reuse)\n\t\tseed = wang_hash((render_data.render_settings.sample_number + 1) * render_data.random_number);\n\telse \n\t\tseed = wang_hash(((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1)) * render_data.random_number);\n\tXorshift32Generator random_number_generator(seed);\n\n\tReSTIRGIReservoir* input_reservoir_buffer = render_data.render_settings.restir_gi_settings.spatial_pass.input_reservoirs;\n\tReSTIRGIReservoir center_pixel_reservoir = input_reservoir_buffer[center_pixel_index];\n\tif ((center_pixel_reservoir.M <= 1) && render_data.render_settings.restir_gi_settings.common_spatial_pass.do_disocclusion_reuse_boost)\n\t\t// Increasing the number of spatial samples for disocclusions\n\t\trender_data.render_settings.restir_gi_settings.common_spatial_pass.reuse_neighbor_count = render_data.render_settings.restir_gi_settings.common_spatial_pass.disocclusion_reuse_count;\n\n\t// Surface data of the center pixel\n\tReSTIRSurface center_pixel_surface = get_pixel_surface(render_data, center_pixel_index, random_number_generator);\n\n\tsetup_adaptive_directional_spatial_reuse<true>(render_data, center_pixel_index, random_number_generator);\n\n\t// Only used with MIS-like weight\n\tint selected_neighbor = 0;\n\tint neighbor_heuristics_cache = 0;\n\tint valid_neighbors_count = 0;\n\tint valid_neighbors_M_sum = 0;\n\tcount_valid_spatial_neighbors<true>(render_data, center_pixel_surface, center_pixel_coords, valid_neighbors_count, valid_neighbors_M_sum, neighbor_heuristics_cache);\n\n\tint reused_neighbors_count = render_data.render_settings.restir_gi_settings.common_spatial_pass.reuse_neighbor_count;\n\tint start_index = 0;\n\tif (valid_neighbors_M_sum == 0)\n\t\t// No valid neighbor to resample from, skip to the initial candidate right away\n\t\tstart_index = reused_neighbors_count;\n\n\tReSTIRGIReservoir spatial_reuse_output_reservoir;\n\tReSTIRSpatialResamplingMISWeight<ReSTIR_GI_BiasCorrectionWeights, /* IsReSTIRGI */ true> mis_weight_function;\n\tXorshift32Generator spatial_neighbors_rng(render_data.render_settings.restir_gi_settings.common_spatial_pass.spatial_neighbors_rng_seed);\n\t// Resampling the neighbors. Using neighbors + 1 here so that\n\t// we can use the last iteration of the loop to resample ourselves (the center pixel)\n\t// \n\t// See the implementation of get_spatial_neighbor_pixel_index() in ReSTIR/UtilsSpatial.h\n\tfor (int neighbor_index = start_index; neighbor_index < reused_neighbors_count + 1; neighbor_index++)\n\t{\n\t\tconst bool is_center_pixel = neighbor_index == reused_neighbors_count;\n\n\t\t// We can already check whether or not this neighbor is going to be\n\t\t// accepted at all by checking the heuristic cache\n\t\tif (neighbor_index < reused_neighbors_count && reused_neighbors_count <= 32)\n\t\t{\n\t\t\t// If not the center pixel, we can check the heuristics, otherwise there's no need to,\n\t\t\t// we know that the center pixel will be accepted\n\t\t\t// \n\t\t\t// Our heuristics cache is a 32bit int so we can only cache 32 values are we're\n\t\t\t// going to have issues if we try to read more than that.\n\t\t\tif ((neighbor_heuristics_cache & (1 << neighbor_index)) == 0)\n\t\t\t{\n\t\t\t\t// Advancing the rng for generating the spatial neighbors since if we \"continue\" here, the spatial neighbors rng\n\t\t\t\t// isn't going to be advanced by the call to 'get_spatial_neighbor_pixel_index' below so we're doing it manually\n\t\t\t\tspatial_neighbor_advance_rng<true>(render_data, spatial_neighbors_rng);\n\n\t\t\t\t// Neighbor not passing the heuristics tests, skipping it right away\n\t\t\t\tcontinue;\n\t\t\t}\n\t\t}\n\n\t\tint neighbor_pixel_index = get_spatial_neighbor_pixel_index<true>(render_data, neighbor_index, center_pixel_coords, spatial_neighbors_rng);\n\t\tif (neighbor_pixel_index == -1)\n\t\t\t// Neighbor out of the viewport\n\t\t\tcontinue;\n\n\t\tif (!is_center_pixel && reused_neighbors_count > 32)\n\t\t\t// If not the center pixel, we can check the heuristics\n\t\t\t// \n\t\t\t// Only checking the heuristic if we have more than 32 neighbors (does not fit in the heuristic cache)\n\t\t\t// If we have less than 32 neighbors, we've already checked the cache at the beginning of this for loop\n\t\t\tif (!check_neighbor_similarity_heuristics<true>(render_data, neighbor_pixel_index, center_pixel_index, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<false>(render_data, center_pixel_surface)))\n\t\t\t\tcontinue;\n\n\t\tReSTIRGIReservoir neighbor_reservoir = input_reservoir_buffer[neighbor_pixel_index];\n\n\t\tfloat shift_mapping_jacobian = 1.0f;\n\t\tif (neighbor_reservoir.UCW > 0.0f && !is_center_pixel && !neighbor_reservoir.sample.is_envmap_path())\n\t\t{\n\t\t\t// Only attempting the shift if the neighbor reservoir is valid\n\t\t\t// \n\t\t\t// Also, if this is the last neighbor resample (meaning that it is the center pixel), \n\t\t\t// the shift mapping is going to be an identity shift with a jacobian of 1 so we don't need to do it\n\t\t\tshift_mapping_jacobian = get_jacobian_determinant_reconnection_shift(neighbor_reservoir.sample.sample_point, neighbor_reservoir.sample.sample_point_geometric_normal.unpack(), center_pixel_surface.shading_point, render_data.g_buffer.primary_hit_position[neighbor_pixel_index], render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\t\t}\n\n\t\tfloat target_function_at_center = 0.0f;\n\t\tbool do_neighbor_target_function_visibility = do_include_visibility_term_or_not<true>(render_data, neighbor_index);\n\t\tif (neighbor_reservoir.UCW > 0.0f)\n\t\t{\n\t\t\tif (is_center_pixel)\n\t\t\t\t// No need to evaluate the center sample at the center pixel, that's exactly\n\t\t\t\t// the target function of the center reservoir\n\t\t\t\ttarget_function_at_center = neighbor_reservoir.sample.target_function;\n\t\t\telse\n\t\t\t{\n\t\t\t\tif (do_neighbor_target_function_visibility)\n\t\t\t\t\ttarget_function_at_center = ReSTIR_GI_evaluate_target_function<KERNEL_OPTION_TRUE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t\t\telse\n\t\t\t\t\ttarget_function_at_center = ReSTIR_GI_evaluate_target_function<KERNEL_OPTION_FALSE>(render_data, neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\t\t\t}\n\t\t}\n\n#if ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_M\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_Z\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(neighbor_reservoir.M);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_LIKE\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, neighbor_reservoir.M);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.UCW,\n\t\t\tneighbor_reservoir.sample,\n\n\t\t\tcenter_pixel_surface, neighbor_index, center_pixel_coords, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\tbool update_mc = center_pixel_reservoir.M > 0 && center_pixel_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function,\n\t\t\tcenter_pixel_reservoir.sample, center_pixel_reservoir.M, center_pixel_reservoir.sample.target_function,\n\t\t\tneighbor_reservoir,\n\n\t\t\tcenter_pixel_surface, target_function_at_center * shift_mapping_jacobian, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\tupdate_mc,/* resampling canonical */ is_center_pixel, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\tbool update_mc = center_pixel_reservoir.M > 0 && center_pixel_reservoir.UCW > 0.0f;\n\n\t\tfloat mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\n\t\t\tneighbor_reservoir.M, neighbor_reservoir.sample.target_function,\n\t\t\tcenter_pixel_reservoir.sample, center_pixel_reservoir.M, center_pixel_reservoir.sample.target_function,\n\t\t\tneighbor_reservoir,\n\n\t\t\tcenter_pixel_surface, target_function_at_center * shift_mapping_jacobian, neighbor_pixel_index, valid_neighbors_count, valid_neighbors_M_sum,\n\t\t\tupdate_mc,/* resampling canonical */ is_center_pixel, random_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t// Combining as in Alg. 1 of the ReSTIR GI paper\n\t\tif (spatial_reuse_output_reservoir.combine_with(neighbor_reservoir, mis_weight, target_function_at_center, shift_mapping_jacobian, random_number_generator))\n\t\t\t// Only used with MIS-like MIS weights\n\t\t\tselected_neighbor = neighbor_index;\n\n\t\tspatial_reuse_output_reservoir.sanity_check(center_pixel_coords);\n\n\t\tReSTIR_optimal_visibility_sampling<true>(render_data,\n\t\t\tspatial_reuse_output_reservoir, center_pixel_reservoir,\n\t\t\tcenter_pixel_surface,\n\t\t\tneighbor_index, reused_neighbors_count,\n\t\t\trandom_number_generator);\n\t}\n\n\tfloat normalization_numerator = 1.0f;\n\tfloat normalization_denominator = 1.0f;\n\n\tReSTIRSpatialNormalizationWeight<ReSTIR_GI_BiasCorrectionWeights, /* Is ReSTIR GI */ true> normalization_function;\n#if ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_M\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface, center_pixel_coords, normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_Z\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.sample, spatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface,\n\t\tcenter_pixel_coords, normalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_LIKE\n\tnormalization_function.get_normalization(render_data,\n\t\tspatial_reuse_output_reservoir.sample, spatial_reuse_output_reservoir.weight_sum,\n\t\tcenter_pixel_surface, selected_neighbor,\n\t\tcenter_pixel_coords, normalization_numerator, normalization_denominator, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\tspatial_reuse_output_reservoir.end_with_normalization(normalization_numerator, normalization_denominator);\n\tspatial_reuse_output_reservoir.sanity_check(center_pixel_coords);\n\n\t// Validating that the sample point resampled is visible from our visible point\n\t// TODO use a flag in the sample reservoir to indicate whether we are unoccluded or not\n\t//\t\t(we are always unoccluded if we resampled the canonical sample for example, in which case we don't have to do the validation)\n\t//\t\tIt would also probably be beneficial to have another kernel do the validation such that samples that don't need the validation\n\t//\t\t(resampled the canonical neighbor) don't do the validation at all\n\tReSTIR_GI_visibility_validation(render_data, spatial_reuse_output_reservoir, center_pixel_surface.shading_point, center_pixel_surface.primitive_index, random_number_generator);\n\n\t// M-capping so that we don't have to M-cap when reading reservoirs on the next frame\n\tif (render_data.render_settings.restir_gi_settings.m_cap > 0)\n\t\t// M-capping the spatial neighbor if an M-cap has been given\n\t\tspatial_reuse_output_reservoir.M = hippt::min(spatial_reuse_output_reservoir.M, render_data.render_settings.restir_gi_settings.m_cap);\n\n\trender_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs[center_pixel_index] = spatial_reuse_output_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/GI/TemporalReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RESTIR_GI_SPATIAL_REUSE_H\n#define DEVICE_RESTIR_GI_SPATIAL_REUSE_H \n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/ReSTIR/Surface.h\"\n#include \"Device/includes/ReSTIR/TemporalMISWeight.h\"\n#include \"Device/includes/ReSTIR/TemporalNormalizationWeight.h\"\n#include \"Device/includes/ReSTIR/Utils.h\"\n#include \"Device/includes/ReSTIR/UtilsTemporal.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n /** References:\n  *\n  * [1] [ReSTIR GI: Path Resampling for Real-Time Path Tracing] https://research.nvidia.com/publication/2021-06_restir-gi-path-resampling-real-time-path-tracing\n  * [2] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time] https://intro-to-restir.cwyman.org/\n  * [3] [A Gentle Introduction to ReSTIR: Path Reuse in Real-time - SIGGRAPH 2023 Presentation Video] https://dl.acm.org/doi/10.1145/3587423.3595511#sec-supp\n  */\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReSTIR_GI_TemporalReuse(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReSTIR_GI_TemporalReuse(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n\tconst uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n\tconst uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n\tif (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tuint32_t center_pixel_index = (x + y * render_data.render_settings.render_resolution.x);\n\n\tif (!render_data.aux_buffers.pixel_active[center_pixel_index] || render_data.g_buffer.first_hit_prim_index[center_pixel_index] == -1)\n\t{\n\t\t// Pixel inactive because of adaptive sampling, returning\n\t\t// Or also we don't have a primary hit\n\t\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs[center_pixel_index] = ReSTIRGIReservoir();\n\n\t\treturn;\n\t}\n\n\tif (render_data.render_settings.restir_gi_settings.common_temporal_pass.temporal_buffer_clear_requested)\n\t\t// We requested a temporal buffer clear for ReSTIR GI\n\t\trender_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs[center_pixel_index] = ReSTIRGIReservoir();\n\n\tif (render_data.render_settings.sample_number == 0 && render_data.render_settings.accumulate)\n\t\t// First frame of accumulation, no temporal history, just outputting the initial candidates\n\t\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer[center_pixel_index];\n\n\t// Initializing the random generator\n\tunsigned int seed;\n\tif (render_data.render_settings.freeze_random)\n\t\tseed = wang_hash(center_pixel_index + 1);\n\telse\n\t\tseed = wang_hash((center_pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\tXorshift32Generator random_number_generator(seed);\n\n\n\t// Surface data of the center pixel\n\tReSTIRSurface center_pixel_surface = get_pixel_surface(render_data, center_pixel_index, random_number_generator);\n\tint temporal_neighbor_pixel_index = find_temporal_neighbor_index<true>(render_data, center_pixel_surface.shading_point, ReSTIRSettingsHelper::get_normal_for_rejection_heuristic<true>(render_data, center_pixel_surface), center_pixel_index, random_number_generator).x;\n\tif (temporal_neighbor_pixel_index == -1 || render_data.render_settings.freeze_random)\n\t{\n\t\t// Temporal occlusion / disoccusion, temporal neighbor is invalid,\n\t\t// we're only going to resample the initial candidates so let's set that as\n\t\t// the output right away\n\t\t//\n\t\t// We're also 'disabling' temporal accumulation if the random is frozen otherwise\n\t\t// very strong correlations will creep up, corrupt the render and potentially invalidate\n\t\t// performance measurements (which we're probably trying to measure since we froze the random)\n\n\t\t// The output of this temporal pass is just the initial candidates reservoir\n\t\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer[center_pixel_index];\n\n\t\treturn;\n\t}\n\n\tif (temporal_neighbor_pixel_index < 0 || temporal_neighbor_pixel_index >= render_data.render_settings.render_resolution.x * render_data.render_settings.render_resolution.y)\n\t\treturn;\n\n\tReSTIRGIReservoir temporal_neighbor_reservoir = render_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs[temporal_neighbor_pixel_index];\n\tif (temporal_neighbor_reservoir.M == 0)\n\t{\n\t\t// No temporal neighbor, the output of this temporal pass is just the initial candidates reservoir\n\t\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs[center_pixel_index] = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer[center_pixel_index];\n\t\t\n\t\treturn;\n\t}\n\n\tReSTIRGIReservoir temporal_reuse_output_reservoir;\n\tReSTIRSurface temporal_neighbor_surface = get_pixel_surface(render_data, temporal_neighbor_pixel_index, render_data.render_settings.use_prev_frame_g_buffer(), random_number_generator);\n\n\tReSTIRTemporalResamplingMISWeight<ReSTIR_GI_BiasCorrectionWeights, true> mis_weight_function;\n\n\t// /* ------------------------------- */\n\t// Resampling the temporal neighbor\n\t// /* ------------------------------- */\n\n\tReSTIRGIReservoir initial_candidates_reservoir = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer[center_pixel_index];\n\tif (temporal_neighbor_reservoir.M > 0)\n\t{\n\t\tfloat target_function_at_center = 0.0f;\n\t\tif (temporal_neighbor_reservoir.UCW > 0.0f)\n\t\t\t// Only resampling if the temporal neighbor isn't empty\n\t\t\t//\n\t\t\t// If the temporal neighbor's reservoir is empty, then we do not get\n\t\t\t// inside that if() and the target function stays at 0.0f which eliminates\n\t\t\t// most of the computations afterwards\n\t\t\t//\n\t\t\t// Matching the visibility used here with the bias correction mode for ease \n\t\t\t// of use (and because manually handling the visibility in the target \n\t\t\t// function of the temporal reuse is tricky for the user to use in \n\t\t\t// combination with other parameters\n\t\t\ttarget_function_at_center = ReSTIR_GI_evaluate_target_function<ReSTIR_GI_BiasCorrectionUseVisibility>(render_data, temporal_neighbor_reservoir.sample, center_pixel_surface, random_number_generator);\n\n\t\tfloat shift_mapping_jacobian = 1.0f;\n\t\tif (temporal_neighbor_reservoir.UCW > 0.0f && !temporal_neighbor_reservoir.sample.is_envmap_path())\n\t\t{\n\t\t\t// Only attempting the shift if the neighbor reservoir is valid\n\t\t\t// \n\t\t\t// Also, if this is the last neighbor resample (meaning that it is the center pixel), \n\t\t\t// the shift mapping is going to be an identity shift with a jacobian of 1 so we don't need to do it\n\t\t\tshift_mapping_jacobian = get_jacobian_determinant_reconnection_shift(temporal_neighbor_reservoir.sample.sample_point, temporal_neighbor_reservoir.sample.sample_point_geometric_normal.unpack(), center_pixel_surface.shading_point, render_data.g_buffer_prev_frame.primary_hit_position[temporal_neighbor_pixel_index], render_data.render_settings.restir_gi_settings.get_jacobian_heuristic_threshold());\n\t\t}\n\n#if ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_M\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_Z\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(temporal_neighbor_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_LIKE\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, temporal_neighbor_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, temporal_neighbor_reservoir,\n\t\t\tinitial_candidates_reservoir, temporal_neighbor_surface, center_pixel_surface,\n\t\t\ttemporal_neighbor_reservoir.M, TEMPORAL_NEIGHBOR_ID, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\t\t\ttarget_function_at_center * shift_mapping_jacobian, TEMPORAL_NEIGHBOR_ID,\n\t\t\trandom_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\t\tfloat temporal_neighbor_resampling_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\n\t\t\ttarget_function_at_center * shift_mapping_jacobian, TEMPORAL_NEIGHBOR_ID,\n\t\t\trandom_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\t\t// Combining as in Alg. 6 of the paper\n\t\ttemporal_reuse_output_reservoir.combine_with(temporal_neighbor_reservoir, temporal_neighbor_resampling_mis_weight, target_function_at_center, shift_mapping_jacobian, random_number_generator);\n\t\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\t}\n\n\t// /* ------------------------------- */\n\t// Resampling the initial candidates\n\t// /* ------------------------------- */\n\n#if ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_M\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(initial_candidates_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_1_OVER_Z\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(initial_candidates_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_LIKE\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, initial_candidates_reservoir);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, initial_candidates_reservoir,\n\t\tinitial_candidates_reservoir, temporal_neighbor_surface, center_pixel_surface,\n\t\ttemporal_neighbor_reservoir.M, INITIAL_CANDIDATES_ID, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data, \n\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\tcenter_pixel_surface, temporal_neighbor_surface, \n\n\t\t/* unused */ 0.0f, INITIAL_CANDIDATES_ID, random_number_generator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tfloat initial_candidates_mis_weight = mis_weight_function.get_resampling_MIS_weight(render_data,\n\t\ttemporal_neighbor_reservoir, initial_candidates_reservoir,\n\t\tcenter_pixel_surface, temporal_neighbor_surface,\n\n\t\t/* unused */ 0.0f, INITIAL_CANDIDATES_ID,\n\t\trandom_number_generator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\ttemporal_reuse_output_reservoir.combine_with(initial_candidates_reservoir, initial_candidates_mis_weight, initial_candidates_reservoir.sample.target_function, /* jacobian is 1 when reusing at the exact same spot */ 1.0f, random_number_generator);\n\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\n\tfloat normalization_numerator = 1.0f;\n\tfloat normalization_denominator = 1.0f;\n\n\tReSTIRTemporalNormalizationWeight<ReSTIR_GI_BiasCorrectionWeights, true> normalization_function;\n#if ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#elif ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO || ReSTIR_GI_BiasCorrectionWeights == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO\n\tnormalization_function.get_normalization(normalization_numerator, normalization_denominator);\n#else\n#error \"Unsupported bias correction mode\"\n#endif\n\n\ttemporal_reuse_output_reservoir.end_with_normalization(normalization_numerator, normalization_denominator);\n\ttemporal_reuse_output_reservoir.sanity_check(make_int2(x, y));\n\n\t// M-capping so that we don't have to M-cap when reading reservoirs on the next frame\n\tif (render_data.render_settings.restir_gi_settings.m_cap > 0)\n\t\t// M-capping the temporal neighbor if an M-cap has been given\n\t\ttemporal_reuse_output_reservoir.M = hippt::min(temporal_reuse_output_reservoir.M, render_data.render_settings.restir_gi_settings.m_cap);\n\n\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs[center_pixel_index] = temporal_reuse_output_reservoir;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/GridFillTemporalReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_GRID_FILL_H\n#define DEVICE_KERNELS_REGIR_GRID_FILL_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/ReGIR/Settings.h\"\n#include \"Device/includes/ReSTIR/ReGIR/TargetFunction.h\"\n\n#include \"HostDeviceCommon/KernelOptions/ReGIROptions.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\nHIPRT_DEVICE LightSampleInformation sample_one_presampled_light(const HIPRTRenderData& render_data, const ReGIRSettings& regir_settings, \n    unsigned int hash_grid_cell_index, int reservoir_index_in_cell, bool primary_hit,\n    Xorshift32Generator& rng)\n{\n    float presampled_light_pdf;\n    ReGIRPresampledLight light_sample = regir_settings.sample_one_presampled_light(hash_grid_cell_index, reservoir_index_in_cell, primary_hit, presampled_light_pdf, rng);\n\n    LightSampleInformation full_sample_information;\n    full_sample_information.emissive_triangle_index = light_sample.emissive_triangle_index;\n    full_sample_information.light_source_normal = light_sample.normal.unpack();\n     full_sample_information.light_area = light_sample.triangle_area;\n    //full_sample_information.emission = light_sample.emission;\n    full_sample_information.emission = render_data.buffers.materials_buffer.get_emission(render_data.buffers.material_indices[light_sample.emissive_triangle_index]);\n    full_sample_information.point_on_light = light_sample.point_on_light;\n\n    // PDF of that point on that triangle\n    full_sample_information.area_measure_pdf = 1.0f / full_sample_information.light_area;\n#if ReGIR_GridFillLightSamplingBaseStrategy == LSS_BASE_UNIFORM\n    // PDF of sampling that triangle uniformly\n    full_sample_information.area_measure_pdf *= 1.0f / render_data.buffers.emissive_triangles_count;\n#elif ReGIR_GridFillLightSamplingBaseStrategy == LSS_BASE_POWER\n    // PDF of sampling that triangle according to its power\n    full_sample_information.area_measure_pdf *= (full_sample_information.emission.luminance() * full_sample_information.light_area) / render_data.buffers.emissives_power_alias_table.sum_elements;\n#endif\n\n    return full_sample_information;\n}\n\ntemplate <bool accumulatePreIntegration>\nHIPRT_DEVICE ReGIRReservoir grid_fill(const HIPRTRenderData& render_data, const ReGIRSettings& regir_settings,\n    unsigned int hash_grid_cell_index, int reservoir_index_in_cell, const ReGIRGridFillSurface& surface, bool primary_hit,\n    Xorshift32Generator& rng)\n{\n    ReGIRReservoir grid_fill_reservoir;\n\n    bool reservoir_is_canonical = regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell);\n\n    int retries = 0;\n    for (int light_sample_index = 0; light_sample_index < regir_settings.get_grid_fill_settings(primary_hit).light_sample_count_per_cell_reservoir; light_sample_index++)\n    {\n        LightSampleInformation light_sample;\n\n        if constexpr (ReGIR_GridFillDoLightPresampling == KERNEL_OPTION_TRUE && !accumulatePreIntegration)\n            // Never using presampling lights for pre integration because pre integration needs\n            // different samples to pre integrate properly and using presampled lights severely restricts\n            // the number of different samples we have available\n            light_sample = sample_one_presampled_light(render_data, regir_settings, hash_grid_cell_index, reservoir_index_in_cell, primary_hit, rng);\n        else\n            light_sample = sample_one_emissive_triangle<ReGIR_GridFillLightSamplingBaseStrategy>(render_data, rng);\n\n        if (light_sample.emissive_triangle_index == -1)\n            continue;\n\n        float target_function;\n        if (reservoir_is_canonical)\n            // This reservoir is canonical, simple target function to keep it canonical (no visibility / cosine terms)\n            target_function = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, \n                surface, primary_hit,\n                light_sample.emission, light_sample.light_source_normal, light_sample.point_on_light, rng);\n        else\n            target_function = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, \n                surface, primary_hit,\n                light_sample.emission, light_sample.light_source_normal, light_sample.point_on_light, rng);\n\n        float mis_weight = 1.0f / regir_settings.get_grid_fill_settings(primary_hit).light_sample_count_per_cell_reservoir;\n        float source_pdf = light_sample.area_measure_pdf;\n\n        grid_fill_reservoir.stream_sample(mis_weight, target_function, source_pdf, light_sample, rng);\n    }\n\n    return grid_fill_reservoir;\n}\n\ntemplate <bool accumulatePreIntegration>\nHIPRT_DEVICE void grid_fill_pre_integration_accumulation(HIPRTRenderData& render_data, const ReGIRReservoir& output_reservoir, bool reservoir_is_canonical, unsigned int hash_grid_cell_index, bool primary_hit)\n{\n    if constexpr (accumulatePreIntegration)\n    {\n        ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n        // Only doing the pre integration on the first sample of the frame\n        // and if we don't have spatial reuse. If we have the spatial reuse, it's\n        // the spatial reuse pass that will do the pre integration accumulation\n        if (!regir_settings.spatial_reuse.do_spatial_reuse)\n        {\n            float normalization;\n            if (reservoir_is_canonical)\n                normalization = regir_settings.get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell() * render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n            else\n                normalization = regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell() * render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n            float integration_increment = hippt::max(0.0f, output_reservoir.sample.target_function * output_reservoir.UCW) / normalization;\n\n            if (reservoir_is_canonical)\n                hippt::atomic_fetch_add(&regir_settings.get_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index], integration_increment);\n            else\n                hippt::atomic_fetch_add(&regir_settings.get_non_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index], integration_increment);\n        }\n    }\n}\n\n/** \n * This kernel is in charge of resetting (when necessary) and filling the ReGIR grid.\n * \n * This kernel also does the temporal reuse if enabled.\n */\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) ReGIR_Grid_Fill_Temporal_Reuse(HIPRTRenderData render_data, ReGIRHashGridSoADevice output_reservoirs_grid, unsigned int number_of_cells_alive, bool primary_hit)\n#else\ntemplate <bool accumulatePreIntegration>\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Grid_Fill_Temporal_Reuse(HIPRTRenderData render_data, ReGIRHashGridSoADevice output_reservoirs_grid, int thread_index, unsigned int number_of_cells_alive, bool primary_hit)\n#endif\n{\n    if (render_data.buffers.emissive_triangles_count == 0)\n        // No initial candidates to sample since no lights\n        return;\n\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n#ifdef __KERNELCC__\n    uint32_t thread_index = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t thread_count = gridDim.x * blockDim.x;\n#endif\n\n    while (thread_index < regir_settings.get_number_of_reservoirs_per_cell(primary_hit) * number_of_cells_alive)\n    {\n        int reservoir_index = thread_index;\n        \n        unsigned int reservoir_index_in_cell = reservoir_index % regir_settings.get_number_of_reservoirs_per_cell(primary_hit);\n        unsigned int cell_alive_index = reservoir_index / regir_settings.get_number_of_reservoirs_per_cell(primary_hit);\n        // If all cells are alive, the cell index is straightforward\n        //\n        // Not all cells are alive, what we have is cell_alive_index which is the index of the cell in the alive list\n        // so we can fetch the index of the cell in the grid cells alive list with that cell_alive_index\n        unsigned int hash_grid_cell_index = regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_list[cell_alive_index];\n        unsigned int reservoir_index_in_grid = hash_grid_cell_index * regir_settings.get_number_of_reservoirs_per_cell(primary_hit) + reservoir_index_in_cell;\n        \n        unsigned int seed;\n        if (render_data.render_settings.freeze_random)\n            seed = wang_hash(reservoir_index_in_grid + 1);\n        else\n            seed = wang_hash((reservoir_index_in_grid + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n        \n        Xorshift32Generator random_number_generator(seed);\n        ReGIRReservoir output_reservoir;\n\n        ReGIRGridFillSurface cell_surface = ReGIR_get_cell_surface(render_data, hash_grid_cell_index, primary_hit);\n\n        // Grid fill\n#ifdef __KERNELCC__\n        constexpr bool ACCUMULATE_PRE_INTEGRATION_OPTION = ReGIR_GridFillSpatialReuse_AccumulatePreIntegration;\n#else\n        constexpr bool ACCUMULATE_PRE_INTEGRATION_OPTION = accumulatePreIntegration;\n#endif\n\n        output_reservoir = grid_fill<ACCUMULATE_PRE_INTEGRATION_OPTION>(render_data, regir_settings, hash_grid_cell_index, reservoir_index_in_cell, cell_surface, primary_hit, random_number_generator);\n        \n        // Normalizing the reservoir\n        output_reservoir.finalize_resampling(1.0f, 1.0f);\n        \n        regir_settings.store_reservoir_custom_buffer_opt(output_reservoirs_grid, output_reservoir, hash_grid_cell_index, reservoir_index_in_cell);\n\n        grid_fill_pre_integration_accumulation<ACCUMULATE_PRE_INTEGRATION_OPTION>(render_data, output_reservoir, regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell), hash_grid_cell_index, primary_hit);\n\n#ifndef __KERNELCC__\n        // We're dispatching exactly one thread per reservoir to compute on the CPU so no need\n        // for the work queue style of things that is only needed on the GPU, we can just exit here\n        break;\n#else\n        // We need to compute the next reservoir index for the next iteration\n        thread_index += thread_count;\n#endif\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/GridPrepopulate.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_REGIR_GRID_PREPOPULATE_H\n#define KERNELS_REGIR_GRID_PREPOPULATE_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReGIR_Grid_Prepopulate(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Grid_Prepopulate(HIPRTRenderData render_data, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = (blockIdx.x * blockDim.x + threadIdx.x) * ReGIR_GridPrepopulationResolutionDownscale;\n    const uint32_t y = (blockIdx.y * blockDim.y + threadIdx.y) * ReGIR_GridPrepopulationResolutionDownscale;\n#endif\n    if (x >= render_data.render_settings.render_resolution.x || y >= render_data.render_settings.render_resolution.y)\n        return;\n\n    uint32_t pixel_index = x + y * render_data.render_settings.render_resolution.x;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n    Xorshift32Generator random_number_generator(seed);\n\n    // Direction to the center of the pixel\n    float x_ray_point_direction = (x + 0.5f);\n    float y_ray_point_direction = (y + 0.5f);\n    if (render_data.current_camera.do_jittering)\n    {\n        // Jitter randomly around the center\n        x_ray_point_direction += random_number_generator() - 0.5f;\n        y_ray_point_direction += random_number_generator() - 0.5f;\n    }\n\n    hiprtRay camera_ray = render_data.current_camera.get_camera_ray(x_ray_point_direction, y_ray_point_direction, render_data.render_settings.render_resolution);\n    RayPayload ray_payload;\n\n    HitInfo closest_hit_info;\n    bool intersection_found = trace_main_path_ray(render_data, camera_ray, ray_payload, closest_hit_info, /* camera ray = no previous primitive hit */ -1, /* bounce. Always 0 for camera rays*/ 0, random_number_generator);\n\n    if (!intersection_found)\n        return;\n\n    ReGIR_update_representative_data(render_data, closest_hit_info.inter_point, closest_hit_info.geometric_normal, render_data.current_camera, closest_hit_info.primitive_index, true, ray_payload.material);\n\n    for (int& bounce = ray_payload.bounce; bounce < render_data.render_settings.nb_bounces + 1; bounce++)\n    {\n        if (ray_payload.next_ray_state != RayState::MISSED)\n        {\n            if (bounce > 0)\n                intersection_found = path_tracing_find_indirect_bounce_intersection(render_data, camera_ray, ray_payload, closest_hit_info, random_number_generator);\n\n            if (intersection_found)\n            {\n                if (bounce > 0)\n                {\n                    bool ReGIR_primary_hit = render_data.render_settings.regir_settings.compute_is_primary_hit(ray_payload);\n\n                    // Storing data for ReGIR representative points\n                    ReGIR_update_representative_data(render_data, closest_hit_info.inter_point, closest_hit_info.geometric_normal, render_data.current_camera, closest_hit_info.primitive_index, ReGIR_primary_hit, ray_payload.material);\n                }\n\n                BSDFIncidentLightInfo sampled_light_info; // This variable is never used, this is just for debugging on the CPU so that we know what the BSDF sampled\n                bool valid_indirect_bounce = path_tracing_compute_next_indirect_bounce<true>(render_data, ray_payload, closest_hit_info, -camera_ray.direction, camera_ray, random_number_generator, &sampled_light_info);\n                if (!valid_indirect_bounce)\n                    // Bad BSDF sample (under the surface), killed by russian roulette, ...\n                    break;\n            }\n            else\n                return;\n        }\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/LightPresampling.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_LIGHT_PRESAMPLING_H\n#define DEVICE_KERNELS_REGIR_LIGHT_PRESAMPLING_H\n\n#include \"Device/includes/LightSampling/LightUtils.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n /**\n  * This kernel inserts the keys of the input hash table into the output hash table\n  *\n  * This is used when the hash table has been resized and we need to re-insert the keys\n  * of the old (smaller) hash table into the new (larger) hash table\n  */\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) ReGIR_Light_Presampling(HIPRTRenderData render_data)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Light_Presampling(HIPRTRenderData render_data, int thread_index)\n#endif\n{\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n#ifdef __KERNELCC__\n    const uint32_t thread_index = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n\n    if (thread_index >= render_data.render_settings.regir_settings.presampled_lights.get_presampled_light_count())\n        return;\n\n    Xorshift32Generator rng(wang_hash(thread_index ^ render_data.random_number));\n\n    LightSampleInformation light_sample = sample_one_emissive_triangle<ReGIR_GridFillLightSamplingBaseStrategy>(render_data, rng);\n\n    ReGIRPresampledLight presampled_light;\n    presampled_light.emissive_triangle_index = light_sample.emissive_triangle_index;\n    presampled_light.point_on_light = light_sample.point_on_light;\n    presampled_light.normal.pack(light_sample.light_source_normal);\n    presampled_light.triangle_area = light_sample.light_area;\n    presampled_light.emission = light_sample.emission;\n\n    render_data.render_settings.regir_settings.presampled_lights.store_one_presampled_light(presampled_light, thread_index);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/PreIntegration.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_REGIR_PRE_INTEGRATION_H\n#define KERNELS_REGIR_PRE_INTEGRATION_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/PathTracing.h\"\n#include \"Device/includes/RayPayload.h\"\n#include \"Device/includes/SanityCheck.h\"\n\n#include \"HostDeviceCommon/Xorshift.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) __launch_bounds__(64) ReGIR_Pre_integration(HIPRTRenderData render_data, unsigned int number_of_cells_alive, bool primary_hit)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Pre_integration(HIPRTRenderData render_data, unsigned int number_of_cells_alive, bool primary_hit, int thread_index)\n#endif\n{\n    if (render_data.buffers.emissive_triangles_count == 0)\n        // No initial candidates to sample since no lights\n        return;\n\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n#ifdef __KERNELCC__\n    uint32_t thread_index = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t thread_count = gridDim.x * blockDim.x;\n#endif\n\n    while (thread_index < number_of_cells_alive)\n    {\n        int cell_alive_index = thread_index;\n\n        unsigned int hash_grid_cell_index = number_of_cells_alive == regir_settings.get_total_number_of_cells_per_grid(primary_hit) ? cell_alive_index : regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_list[cell_alive_index];\n\n        unsigned int seed;\n        if (render_data.render_settings.freeze_random)\n            seed = wang_hash(hash_grid_cell_index + 1);\n        else\n            seed = wang_hash((hash_grid_cell_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n        Xorshift32Generator random_number_generator(seed);\n\n\t\tReGIRGridFillSurface surface = ReGIR_get_cell_surface(render_data, hash_grid_cell_index, primary_hit);\n\n\t\t// This kernel always uses a Lambertian BRDF where the view direction is not used so it can be set to zero\n        float3 view_direction = make_float3(0.0f, 0.0f, 0.0f);\n\n        float non_canonical_cell_integration_sum = 0.0f;\n        for (int i = 0; i < regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell(); i++)\n        {\n            bool invalid_sample = false;\n            ReGIRReservoir non_canonical_reservoir = regir_settings.get_cell_non_canonical_reservoir_from_index(hash_grid_cell_index, primary_hit, i, &invalid_sample);\n            if (invalid_sample || non_canonical_reservoir.UCW <= 0.0f)\n                continue;\n\n            LightSampleInformation light_sample;\n            light_sample.area_measure_pdf = 1.0f / non_canonical_reservoir.UCW;\n            light_sample.emission = get_emission_of_triangle_from_index(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n            light_sample.emissive_triangle_index = non_canonical_reservoir.sample.emissive_triangle_index;\n            light_sample.light_area = triangle_area(render_data, non_canonical_reservoir.sample.emissive_triangle_index);\n            light_sample.light_source_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, non_canonical_reservoir.sample.emissive_triangle_index));\n            light_sample.point_on_light = non_canonical_reservoir.sample.point_on_light;\n\n            if (light_sample.area_measure_pdf <= 0.0f)\n                // Can happen for very small triangles\n                continue;\n\n            float non_canonical_target_function = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, \n                surface, primary_hit,\n                light_sample.emission, light_sample.light_source_normal, light_sample.point_on_light, random_number_generator);\n\n            if (non_canonical_target_function <= 0.0f)\n                continue;\n\n            non_canonical_cell_integration_sum += non_canonical_target_function * non_canonical_reservoir.UCW;\n        }\n\n        regir_settings.get_non_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index] += non_canonical_cell_integration_sum / (regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell()) / render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n\n        float canonical_cell_integration_sum = 0.0f;\n        for (int i = 0; i < regir_settings.get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell(); i++)\n        {\n            bool invalid_sample = false;\n            ReGIRReservoir canonical_reservoir = regir_settings.get_cell_canonical_reservoir_from_index(hash_grid_cell_index, primary_hit, i, &invalid_sample);\n            if (invalid_sample || canonical_reservoir.UCW <= 0.0f)\n                continue;\n\n            LightSampleInformation light_sample;\n            light_sample.area_measure_pdf = 1.0f / canonical_reservoir.UCW;\n            light_sample.emission = get_emission_of_triangle_from_index(render_data, canonical_reservoir.sample.emissive_triangle_index);\n            light_sample.emissive_triangle_index = canonical_reservoir.sample.emissive_triangle_index;\n            light_sample.light_area = triangle_area(render_data, canonical_reservoir.sample.emissive_triangle_index);\n            light_sample.light_source_normal = hippt::normalize(get_triangle_normal_not_normalized(render_data, canonical_reservoir.sample.emissive_triangle_index));\n            light_sample.point_on_light = canonical_reservoir.sample.point_on_light;\n\n            if (light_sample.area_measure_pdf <= 0.0f)\n                // Can happen for very small triangles\n                continue;\n\n            float canonical_target_function = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, \n                surface, primary_hit,\n                light_sample.emission, light_sample.light_source_normal, light_sample.point_on_light, random_number_generator);\n\n            if (canonical_target_function <= 0.0f)\n                continue;\n\n            canonical_cell_integration_sum += canonical_target_function * canonical_reservoir.UCW;\n        }\n\n        regir_settings.get_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index] += canonical_cell_integration_sum / (regir_settings.get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell()) / render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n\n#ifndef __KERNELCC__\n        // We're dispatching exactly one thread per reservoir to compute on the CPU so no need\n        // for the work queue style of things that is only needed on the GPU, we can just exit here\n        break;\n#else\n        // We need to compute the next reservoir index for the next iteration\n        thread_index += thread_count;\n#endif\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/Rehash.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_REHASH_KERNEL_H\n#define DEVICE_KERNELS_REGIR_REHASH_KERNEL_H\n\n#include \"Device/includes/ReSTIR/ReGIR/Settings.h\"\n\n/**\n * This kernel inserts the keys of the input hash table into the output hash table\n *\n * This is used when the hash table has been resized and we need to re-insert the keys\n * of the old (smaller) hash table into the new (larger) hash table\n */\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) ReGIR_Rehash(\n    HIPRTCamera current_camera,\n\n    ReGIRHashGrid new_hash_grid,\n    ReGIRHashGridSoADevice new_hash_grid_soa, ReGIRHashCellDataSoADevice new_hash_cell_data,\n\n    ReGIRHashCellDataSoADevice old_hash_cell_data,\n    unsigned int* old_grid_cells_alive_list,\n    unsigned int old_cell_count, \n    \n    bool primary_hit)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Rehash(\n    HIPRTCamera current_camera,\n\n    ReGIRHashGrid new_hash_grid,\n    ReGIRHashGridSoADevice new_hash_grid_soa, ReGIRHashCellDataSoADevice new_hash_cell_data,\n\n    ReGIRHashCellDataSoADevice old_hash_cell_data,\n    unsigned int* old_grid_cells_alive_list, \n    unsigned int old_cell_count, \n    \n    bool primary_hit,\n\n    unsigned int cell_index\n)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t cell_index = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n\n    if (cell_index >= old_cell_count)\n        return;\n\n    unsigned int cell_alive_index = old_grid_cells_alive_list[cell_index];\n\n    float3 world_position = old_hash_cell_data.world_points[cell_alive_index];\n    float3 shading_normal = old_hash_cell_data.world_normals[cell_alive_index].unpack();\n    int primitive_index = old_hash_cell_data.hit_primitive[cell_alive_index];\n\n\tDeviceUnpackedEffectiveMaterial material;\n\tmaterial.roughness = old_hash_cell_data.roughness[cell_alive_index] / 255.0f;\n\tmaterial.metallic = old_hash_cell_data.metallic[cell_alive_index] / 255.0f;\n\tmaterial.specular = old_hash_cell_data.specular[cell_alive_index] / 255.0f;\n\n    ReGIRSettings::insert_hash_cell_data_static(\n        new_hash_grid, new_hash_grid_soa, new_hash_cell_data,\n        world_position, shading_normal, current_camera, primitive_index, primary_hit, material);\n}\n\n#endif"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/SpatialReuse.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_SPATIAL_REUSE_H\n#define DEVICE_KERNELS_REGIR_SPATIAL_REUSE_H\n \n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/LightSampling/LightUtils.h\"\n#include \"Device/includes/ReSTIR/ReGIR/TargetFunction.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#ifndef __KERNELCC__\n#include \"omp.h\"\n#endif\n\nHIPRT_DEVICE unsigned int get_random_neighbor_hash_grid_cell_index_with_retries(HIPRTRenderData& render_data, \n    bool primary_hit, float3 point_in_center_cell, float3 center_cell_normal, float center_cell_roughness,\n    Xorshift32Generator& spatial_neighbor_rng)\n{\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n    unsigned int neighbor_hash_grid_cell_index_in_grid;\n    bool neighbor_invalid = true;\n\n    int retry = 0;\n    while (retry < regir_settings.spatial_reuse.retries_per_neighbor && neighbor_invalid)\n    {\n        float3 random_neighbor = make_float3(spatial_neighbor_rng(), spatial_neighbor_rng(), spatial_neighbor_rng());\n\n        float3 offset_float_radius_1 = random_neighbor * 2.0f - 1.0f;\n        float3 offset_float_radius = offset_float_radius_1 * regir_settings.spatial_reuse.spatial_reuse_radius;\n        float3 offset = make_float3(roundf(offset_float_radius.x), roundf(offset_float_radius.y), roundf(offset_float_radius.z));\n        float3 point_in_neighbor_cell = point_in_center_cell + offset * regir_settings.get_cell_size(point_in_center_cell, render_data.current_camera, center_cell_roughness, primary_hit);\n        \n        neighbor_hash_grid_cell_index_in_grid = regir_settings.get_hash_grid_cell_index_from_world_pos(point_in_neighbor_cell, center_cell_normal, render_data.current_camera, center_cell_roughness, primary_hit);\n        if (neighbor_hash_grid_cell_index_in_grid != HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX && regir_settings.get_hash_cell_data_soa(primary_hit).grid_cell_alive[neighbor_hash_grid_cell_index_in_grid])\n\t\t\t// Neighbor is inside of the grid and alive, we can use it\n            neighbor_invalid = false;\n\n        retry++;\n    }\n\n    if (neighbor_invalid)\n        // We couldn't find a good neighbor\n        return HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX;\n\n    return neighbor_hash_grid_cell_index_in_grid;\n}\n\nHIPRT_DEVICE ReGIRReservoir spatial_reuse(HIPRTRenderData& render_data,\n    ReGIRHashGridSoADevice& input_reservoirs,\n    int reservoir_index_in_cell, int hash_grid_cell_index, \n    bool primary_hit, float3 center_cell_point, float3 center_cell_normal, float center_cell_roughness,\n    Xorshift32Generator& spatial_neighbor_rng, Xorshift32Generator& random_number_generator)\n{\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n    ReGIRReservoir output_reservoir;\n\n    int selected = 0;\n    for (int neighbor_index = 0; neighbor_index < regir_settings.spatial_reuse.spatial_neighbor_count + 1; neighbor_index++)\n    {\n        bool is_center_cell = neighbor_index == regir_settings.spatial_reuse.spatial_neighbor_count;\n\n        // Getting a random neighbor and retrying a certain amount of times\n        // in case the neighbor that we picked was out of the grid, in a dead cell, ...\n        //\n        // This is to have more chance to get a reusable neighbor --> more reuse --> less variance\n        float3 random_neighbor;\n        int neighbor_hash_grid_cell_index_in_grid;\n\n        if (is_center_cell)\n            neighbor_hash_grid_cell_index_in_grid = hash_grid_cell_index;\n        else\n        {\n            neighbor_hash_grid_cell_index_in_grid = get_random_neighbor_hash_grid_cell_index_with_retries(render_data, primary_hit, center_cell_point, center_cell_normal, center_cell_roughness, spatial_neighbor_rng);\n            if (neighbor_hash_grid_cell_index_in_grid == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n                // Could not find a valid neighbor\n                continue;\n        }\n\n        for (int neighbor_reuse = 0; neighbor_reuse < regir_settings.spatial_reuse.reuse_per_neighbor_count; neighbor_reuse++)\n        {\n            // Picking a random reservoir in the neighbor cell\n\t\t\t// If our reservoir is canonical, we pick a random canonical reservoir in the neighbor cell.\n            // Same for non-canonical\n            int random_reservoir_index_in_cell;\n            if (regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell))\n                random_reservoir_index_in_cell = random_number_generator() * regir_settings.get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell() + regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n            else\n                random_reservoir_index_in_cell = random_number_generator() * regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell();\n\n            ReGIRReservoir neighbor_reservoir = regir_settings.get_reservoir_from_grid_cell_index(input_reservoirs, neighbor_hash_grid_cell_index_in_grid, random_reservoir_index_in_cell);\n            if (neighbor_reservoir.UCW <= 0.0f)\n                continue;\n\n            ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, neighbor_reservoir.sample.emissive_triangle_index);\n            float3 point_on_light = neighbor_reservoir.sample.point_on_light;\n            float3 light_source_normal = get_triangle_normal_not_normalized(render_data, neighbor_reservoir.sample.emissive_triangle_index);\n            float light_source_area = hippt::length(light_source_normal) * 0.5f;\n            light_source_normal /= light_source_area * 2.0f;\n\n            float target_function_at_center;\n            if (regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell))\n                target_function_at_center = ReGIR_grid_fill_evaluate_canonical_target_function(render_data, hash_grid_cell_index, primary_hit,\n                    emission, light_source_normal, point_on_light, random_number_generator);\n            else\n                target_function_at_center = ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, hash_grid_cell_index, primary_hit,\n                    emission, light_source_normal, point_on_light, random_number_generator);\n\n            // MIS weight is 1.0f because we're going to normalize at the end instead of during the resampling\n            float mis_weight = 1.0f;\n            output_reservoir.stream_reservoir(mis_weight, target_function_at_center, neighbor_reservoir, random_number_generator);\n        }\n    }\n\n    return output_reservoir;\n}\n\nHIPRT_DEVICE int spatial_reuse_mis_weight(HIPRTRenderData& render_data, const ReGIRReservoir& output_reservoir,\n    int reservoir_index_in_cell, int hash_grid_cell_index, \n    bool primary_hit, float3 center_cell_point, float3 center_cell_normal, float center_cell_roughness,\n    Xorshift32Generator& spatial_neighbor_rng, Xorshift32Generator& random_number_generator)\n{\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n    // Now counting the number of neighbors that could have produced this sample for the MIS weight\n    // This is 1/Z MIS weights\n    int valid_neighbor_count = 0;\n\n    if (output_reservoir.weight_sum > 0.0f)\n    {\n        ColorRGB32F emission = get_emission_of_triangle_from_index(render_data, output_reservoir.sample.emissive_triangle_index);\n        float3 point_on_light = output_reservoir.sample.point_on_light;\n        float3 light_source_normal = get_triangle_normal_not_normalized(render_data, output_reservoir.sample.emissive_triangle_index);\n        float light_source_area = hippt::length(light_source_normal) * 0.5f;\n        light_source_normal /= light_source_area * 2.0f;\n\n        for (int neighbor_index = 0; neighbor_index < regir_settings.spatial_reuse.spatial_neighbor_count + 1; neighbor_index++)\n        {\n            bool is_center_cell = neighbor_index == regir_settings.spatial_reuse.spatial_neighbor_count;\n\n            int neighbor_hash_grid_cell_index_in_grid;\n\n            if (is_center_cell)\n                neighbor_hash_grid_cell_index_in_grid = hash_grid_cell_index;\n            else\n            {\n                neighbor_hash_grid_cell_index_in_grid = get_random_neighbor_hash_grid_cell_index_with_retries(render_data, primary_hit, center_cell_point, center_cell_normal, center_cell_roughness, spatial_neighbor_rng);\n                if (neighbor_hash_grid_cell_index_in_grid == HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX)\n                    // Could not find a valid neighbor\n                    continue;\n            }\n\n            if (regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell))\n                // A canonical reservoir can always be produced by anyone\n                valid_neighbor_count += regir_settings.spatial_reuse.reuse_per_neighbor_count;\n            else\n            {\n                // Non-canonical sample, we need to count how many neighbors could have produced it\n                if (ReGIR_grid_fill_evaluate_non_canonical_target_function(render_data, neighbor_hash_grid_cell_index_in_grid, primary_hit, emission, light_source_normal, point_on_light, random_number_generator) > 0.0f)\n                    valid_neighbor_count += regir_settings.spatial_reuse.reuse_per_neighbor_count;\n            }\n        }\n    }\n\n    return valid_neighbor_count;\n}\n\ntemplate <bool accumulatePreIntegration>\nHIPRT_DEVICE HIPRT_INLINE void spatial_reuse_pre_integration_accumulation(HIPRTRenderData& render_data, const ReGIRReservoir& output_reservoir, bool reservoir_is_canonical, unsigned int hash_grid_cell_index, bool primary_hit)\n{\n    if constexpr (accumulatePreIntegration)\n    {\n        if (render_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_index == render_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_count - 1)\n        {\n\t\t\t// Only accumulating pre-integration factors on the last spatial reuse pass\n\n            ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n            // Only doing the pre integration on the first sample of the frame\n            float normalization;\n            if (reservoir_is_canonical)\n                normalization = regir_settings.get_grid_fill_settings(primary_hit).get_canonical_reservoir_count_per_cell() * render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n            else\n                normalization = regir_settings.get_grid_fill_settings(primary_hit).get_non_canonical_reservoir_count_per_cell() * render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS;\n            float integration_increment = hippt::max(0.0f, output_reservoir.sample.target_function * output_reservoir.UCW) / normalization;\n\n            if (reservoir_is_canonical)\n                hippt::atomic_fetch_add(&regir_settings.get_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index], integration_increment);\n            else\n                hippt::atomic_fetch_add(&regir_settings.get_non_canonical_pre_integration_factor_buffer(primary_hit)[hash_grid_cell_index], integration_increment);\n        }\n    }\n}\n\n /** \n  * This kernel is in charge of the spatial reuse on the ReGIR grid.\n  * \n  * Each cell reuses from random cells adjacent to it\n  */\n #ifdef __KERNELCC__\n GLOBAL_KERNEL_SIGNATURE(void) ReGIR_Spatial_Reuse(HIPRTRenderData render_data, \n     ReGIRHashGridSoADevice input_reservoirs_grid, ReGIRHashGridSoADevice output_reservoirs_grid, ReGIRHashCellDataSoADevice output_reservoirs_hash_cell_data,\n     unsigned int number_of_cells_alive, bool primary_hit)\n #else\ntemplate <bool accumulatePreIntegration>\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Spatial_Reuse(HIPRTRenderData render_data,\n    ReGIRHashGridSoADevice input_reservoirs_grid, ReGIRHashGridSoADevice output_reservoirs_grid, ReGIRHashCellDataSoADevice output_reservoirs_hash_cell_data,\n    unsigned int number_of_cells_alive, bool primary_hit, int thread_index)\n #endif\n {\n    if (render_data.buffers.emissive_triangles_count == 0)\n        // No initial candidates to sample since no lights\n        return;\n\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n#ifdef __KERNELCC__\n    uint32_t thread_index = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t thread_count = gridDim.x * blockDim.x;\n#endif\n\n    while (thread_index < regir_settings.get_number_of_reservoirs_per_cell(primary_hit) * number_of_cells_alive)\n    {\n        int reservoir_index = thread_index;\n        \n        int reservoir_index_in_cell = reservoir_index % regir_settings.get_grid_fill_settings(primary_hit).get_total_reservoir_count_per_cell();\n        int cell_alive_index = reservoir_index / regir_settings.get_number_of_reservoirs_per_cell(primary_hit);\n        int hash_grid_cell_index = cell_alive_index;\n        if (number_of_cells_alive == regir_settings.get_total_number_of_cells_per_grid(primary_hit))\n            // If all cells are alive, the cell index is straightforward\n            hash_grid_cell_index = cell_alive_index;\n        else\n            // Not all cells are alive, what we have is cell_alive_index which is the index of the cell in the alive list\n            // so we can fetch the index of the cell in the grid cells alive list with that cell_alive_index\n            hash_grid_cell_index = regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_list[cell_alive_index];\n        int reservoir_index_in_grid = hash_grid_cell_index * regir_settings.get_number_of_reservoirs_per_cell(primary_hit) + reservoir_index_in_cell;\n        \n        unsigned int seed;\n        if (render_data.render_settings.freeze_random)\n            seed = wang_hash(reservoir_index_in_grid + 1);\n        else\n            seed = wang_hash((reservoir_index_in_grid + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n\n        Xorshift32Generator random_number_generator(seed);\n\n        float3 center_cell_point = ReGIR_get_cell_world_point(render_data, hash_grid_cell_index, primary_hit);\n        float3 center_cell_normal = ReGIR_get_cell_world_normal(render_data, hash_grid_cell_index, primary_hit);\n        float center_cell_roughness = ReGIR_get_cell_roughness(render_data, hash_grid_cell_index, primary_hit);\n\n        if (regir_settings.get_hash_cell_data_soa(primary_hit).grid_cell_alive[hash_grid_cell_index] == 0)\n        {\n            // Grid cell wasn't used during shading in the last frame, let's not refill it\n            \n            // Storing an empty reservoir to clear the cell\n            regir_settings.store_reservoir_custom_buffer_opt(output_reservoirs_grid,  ReGIRReservoir(), hash_grid_cell_index, reservoir_index_in_cell);\n            \n            return;\n        }\n        \n        unsigned int spatial_neighbor_rng_seed;\n        if (regir_settings.spatial_reuse.do_coalesced_spatial_reuse)\n            // Everyone is going to use the same RNG (the RNG doesn't depend on the pixel index) \n            // such that memory accesses on the spatial neighbors are coalesced to improve performance\n            spatial_neighbor_rng_seed = render_data.render_settings.freeze_random ? render_data.random_number : (render_data.render_settings.sample_number + 1) * render_data.random_number;\n        else\n            spatial_neighbor_rng_seed = wang_hash(seed);\n\n        Xorshift32Generator spatial_neighbor_rng(spatial_neighbor_rng_seed);\n        ReGIRReservoir output_reservoir = spatial_reuse(render_data, input_reservoirs_grid, reservoir_index_in_cell, hash_grid_cell_index, primary_hit, center_cell_point, center_cell_normal, center_cell_roughness, spatial_neighbor_rng, random_number_generator);\n\n        spatial_neighbor_rng.m_state.seed = spatial_neighbor_rng_seed;\n\n        int valid_neighbor_count = spatial_reuse_mis_weight(render_data, output_reservoir, \n                reservoir_index_in_cell, hash_grid_cell_index, \n                primary_hit, center_cell_point, center_cell_normal, center_cell_roughness,\n                spatial_neighbor_rng, random_number_generator);\n\n        // Normalizing the reservoirs to 1\n        output_reservoir.finalize_resampling(1.0f, valid_neighbor_count);\n\n        regir_settings.store_reservoir_custom_buffer_opt(output_reservoirs_grid, output_reservoir, hash_grid_cell_index, reservoir_index_in_cell);\n\n#ifdef __KERNELCC__\n        spatial_reuse_pre_integration_accumulation<ReGIR_GridFillSpatialReuse_AccumulatePreIntegration>(render_data, output_reservoir, regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell), hash_grid_cell_index, primary_hit);\n#else\n        spatial_reuse_pre_integration_accumulation<accumulatePreIntegration>(render_data, output_reservoir, regir_settings.get_grid_fill_settings(primary_hit).reservoir_index_in_cell_is_canonical(reservoir_index_in_cell), hash_grid_cell_index, primary_hit);\n#endif\n\n#ifndef __KERNELCC__\n        // We're dispatching exactly one thread per reservoir to compute on the CPU so no need\n        // for the work queue style of things that is only needed on the GPU, we can just exit here\n        break;\n#else\n        // We need to compute the next reservoir index for the next iteration\n        thread_index += thread_count;\n#endif\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/ReSTIR/ReGIR/SupersamplingCopy.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_KERNELS_REGIR_SUPERSAMPLING_COPY_H\n#define DEVICE_KERNELS_REGIR_SUPERSAMPLING_COPY_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n/**\n * This kernel inserts the keys of the input hash table into the output hash table\n *\n * This is used when the hash table has been resized and we need to re-insert the keys\n * of the old (smaller) hash table into the new (larger) hash table\n */\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) ReGIR_Supersampling_Copy(HIPRTRenderData render_data, ReGIRHashGridSoADevice input_reservoirs_to_copy)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline ReGIR_Supersampling_Copy(HIPRTRenderData render_data, ReGIRHashGridSoADevice input_reservoirs_to_copy, int thread_index)\n#endif\n{\n    ReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n#ifdef __KERNELCC__\n    const uint32_t thread_index = blockIdx.x * blockDim.x + threadIdx.x;\n#endif\n\n#ifdef __KERNELCC__\n    if (thread_index >= *render_data.render_settings.regir_settings.get_hash_cell_data_soa(true).grid_cells_alive_count * regir_settings.get_number_of_reservoirs_per_cell(true))\n#else\n    if (thread_index >= render_data.render_settings.regir_settings.get_hash_cell_data_soa(true).grid_cells_alive_count->load() * regir_settings.get_number_of_reservoirs_per_cell(true))\n#endif\n    {\n        return;\n    }\n\n    unsigned int reservoir_index = thread_index;\n    unsigned int reservoir_index_in_cell = reservoir_index % regir_settings.get_number_of_reservoirs_per_cell(true);\n    unsigned int cell_alive_index = reservoir_index / regir_settings.get_number_of_reservoirs_per_cell(true);\n    // If all cells are alive, the cell index is straightforward\n    //\n    // Not all cells are alive, what we have is cell_alive_index which is the index of the cell in the alive list\n    // so we can fetch the index of the cell in the grid cells alive list with that cell_alive_index\n    unsigned int hash_grid_cell_index = regir_settings.get_hash_cell_data_soa(true).grid_cells_alive_list[cell_alive_index];\n    unsigned int reservoir_index_in_grid = hash_grid_cell_index * regir_settings.get_number_of_reservoirs_per_cell(true) + reservoir_index_in_cell;\n\n    ReGIRReservoir reservoir_to_copy = regir_settings.hash_grid.read_full_reservoir(input_reservoirs_to_copy, reservoir_index_in_grid);\n\n\tunsigned int reservoir_index_in_supersampling_grid = reservoir_index_in_grid + regir_settings.supersampling.correl_reduction_current_grid * regir_settings.get_number_of_reservoirs_per_grid(true);\n\n    render_data.render_settings.regir_settings.hash_grid.store_full_reservoir(regir_settings.supersampling.correlation_reduction_grid, reservoir_to_copy, reservoir_index_in_supersampling_grid);\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/TraceTest.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_TRACE_TEST_H\n#define KERNELS_TRACE_TEST_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/Hash.h\"\n#include \"Device/includes/Intersect.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) TraceTest(HIPRTRenderData render_data, int2 res)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline TraceTest(HIPRTRenderData render_data, int2 res, int x, int y)\n#endif\n{\n#ifdef __KERNELCC__\n    const uint32_t x = blockIdx.x * blockDim.x + threadIdx.x;\n    const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;\n#endif\n    if (x >= res.x || y >= res.y)\n        return;\n\n    uint32_t pixel_index = x + y * res.x;\n\n    unsigned int seed;\n    if (render_data.render_settings.freeze_random)\n        seed = wang_hash(pixel_index + 1);\n    else\n        seed = wang_hash((pixel_index + 1) * (render_data.render_settings.sample_number + 1) * render_data.random_number);\n    Xorshift32Generator random_number_generator(seed);\n\n    // Direction to the center of the pixel\n    float x_ray_point_direction = (x + 0.5f);\n    float y_ray_point_direction = (y + 0.5f);\n    if (render_data.current_camera.do_jittering)\n    {\n        // Jitter randomly around the center\n        x_ray_point_direction += random_number_generator() - 0.5f;\n        y_ray_point_direction += random_number_generator() - 0.5f;\n    }\n\n    hiprtRay ray = render_data.current_camera.get_camera_ray(x_ray_point_direction, y_ray_point_direction, res);\n    hiprtHit hit;\n\n#ifdef __KERNELCC__\n#if UseSharedStackBVHTraversal == KERNEL_OPTION_TRUE\n#if SharedStackBVHTraversalSize > 0\n    hiprtSharedStackBuffer shared_stack_buffer{ SharedStackBVHTraversalSize, shared_stack_cache };\n#else\n    hiprtSharedStackBuffer shared_stack_buffer{ 0, nullptr };\n#endif\n    hiprtGlobalStack global_stack(render_data.global_traversal_stack_buffer, shared_stack_buffer);\n\n    hiprtGeomTraversalClosestCustomStack<hiprtGlobalStack> traversal(render_data.GPU_BVH, ray, global_stack, hiprtTraversalHintDefault);\n#else\n    hiprtGeomTraversalClosest traversal(render_data.GPU_BVH, ray, hiprtTraversalHintDefault);\n#endif\n\n    hit = traversal.getNextHit();\n#endif\n\n    render_data.g_buffer.first_hit_prim_index[pixel_index] = hit.hasHit() ? 1 : 0;\n}\n\n#endif\n"
  },
  {
    "path": "src/Device/kernels/Utils/RayVolumeStateSize.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef KERNELS_RAY_VOLUME_STATE_SIZE_H\n#define KERNELS_RAY_VOLUME_STATE_SIZE_H\n\n#include \"Device/includes/FixIntellisense.h\"\n#include \"Device/includes/RayVolumeState.h\"\n#include \"HostDeviceCommon/Packing.h\"\n#include \"HostDeviceCommon/RenderSettings.h\"\n\n#ifdef __KERNELCC__\nGLOBAL_KERNEL_SIGNATURE(void) RayVolumeStateSize(size_t* out_buffer)\n#else\nGLOBAL_KERNEL_SIGNATURE(void) inline RayVolumeStateSize(size_t* out_buffer)\n#endif\n{\n\tout_buffer[0] = sizeof(RayVolumeState);\n}\n\n#endif\n"
  },
  {
    "path": "src/Experimentations/TestCopyKernelAlignment.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernel.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n\n#include <memory>\n\nvoid TestCopyKernelAlignment()\n{\n    std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx = std::make_shared<HIPRTOrochiCtx>(0);\n\n    GPUKernel test_copy_kernel;\n    test_copy_kernel.set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/Experimentations/TestCopyKernelAlignment.h\");\n    test_copy_kernel.set_kernel_function_name(\"TestCopyKernelAlignment\");\n    test_copy_kernel.compile(hiprt_orochi_ctx);\n\n#define BUFFER_SIZE 500000000\n#define ITERATIONS 1000\n\n    OrochiBuffer<ColorRGB32F> buffer_a(BUFFER_SIZE);\n    OrochiBuffer<ColorRGB32F> buffer_b(BUFFER_SIZE);\n\n    size_t buffer_size = BUFFER_SIZE;\n\n    ColorRGB32F* buffer_a_ptr = buffer_a.get_device_pointer();\n    ColorRGB32F* buffer_b_ptr = buffer_b.get_device_pointer();\n    void* launch_args[] = { &buffer_a_ptr, &buffer_b_ptr, &buffer_size };\n\n    float average_sum = 0.0f;\n    float min_exec_time = 1000000.0f;\n    float max_exec_time = 0.0f;\n\n    for (int i = 0; i < ITERATIONS; i++)\n    {\n        float execution_time;\n        test_copy_kernel.launch_synchronous(256, 1, BUFFER_SIZE, 1, launch_args, &execution_time);\n\n        min_exec_time = hippt::min(execution_time, min_exec_time);\n        max_exec_time = hippt::max(execution_time, max_exec_time);\n        average_sum += execution_time;\n    }\n\n    std::cout << \"Min/max/average exec time:\" << min_exec_time << \" / \" << max_exec_time << \"/\" << average_sum / ITERATIONS << \" ms\" << std::endl;\n}"
  },
  {
    "path": "src/Experimentations/TestCopyKernelAlignment.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef EXPERIMENTATIONS_TEST_COPY_KERNEL_ALIGNMENT_H\n#define EXPERIMENTATIONS_TEST_COPY_KERNEL_ALIGNMENT_H\n\nvoid TestCopyKernelAlignment();\n\n#endif\n"
  },
  {
    "path": "src/Experimentations/TestCopyKernelRestrict.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernel.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n\n#include <memory>\n\nvoid TestCopyKernelRestrict()\n{\n    std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx = std::make_shared<HIPRTOrochiCtx>(0);\n\n    GPUKernel test_copy_kernel;\n    test_copy_kernel.set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/Experimentations/TestCopyKernelRestrict.h\");\n    test_copy_kernel.set_kernel_function_name(\"TestCopyKernelRestrict\");\n    test_copy_kernel.compile(hiprt_orochi_ctx);\n\n#define BUFFER_SIZE 1000000000\n#define ITERATIONS 100\n    OrochiBuffer<float> buffer_a(BUFFER_SIZE);\n    OrochiBuffer<float> buffer_b(BUFFER_SIZE);\n    OrochiBuffer<float> buffer_c(BUFFER_SIZE);\n    OrochiBuffer<float> buffer_d(BUFFER_SIZE);\n\n    buffer_a.memset_whole_buffer(1);\n    buffer_b.memset_whole_buffer(1);\n    buffer_c.memset_whole_buffer(1);\n    buffer_d.memset_whole_buffer(1);\n\n    size_t buffer_size = BUFFER_SIZE;\n\n\tfloat* buffer_a_ptr = buffer_a.get_device_pointer();\n\tfloat* buffer_b_ptr = buffer_b.get_device_pointer();\n\tfloat* buffer_c_ptr = buffer_c.get_device_pointer();\n\tfloat* buffer_d_ptr = buffer_d.get_device_pointer();\n\n    void* launch_args[] = { &buffer_a_ptr, &buffer_b_ptr, &buffer_c_ptr, &buffer_c_ptr, &buffer_size };\n\n    float min_exec_time = 1000000.0f;\n    float max_exec_time = 0.0f;\n\n    for (int i = 0; i < ITERATIONS; i++)\n    {\n        float execution_time;\n        test_copy_kernel.launch_synchronous(256, 1, BUFFER_SIZE, 1, launch_args, &execution_time);\n\n        min_exec_time = hippt::min(execution_time, min_exec_time);\n        max_exec_time = hippt::max(execution_time, max_exec_time);\n    }\n\n    std::cout << \"Min/max exec time:\" << min_exec_time << \" / \" << max_exec_time << \" ms\" << std::endl;\n}"
  },
  {
    "path": "src/Experimentations/TestCopyKernelRestrict.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef EXPERIMENTATIONS_TEST_COPY_KERNEL_RESTRICT_H\n#define EXPERIMENTATIONS_TEST_COPY_KERNEL_RESTRICT_H\n\nvoid TestCopyKernelRestrict();\n\n#endif\n"
  },
  {
    "path": "src/Experimentations/TestCopyKernelSimple.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernel.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/kernels/Experimentations/TestCopyKernelSimple.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n\n#include <memory>\n\nvoid TestCopyKernelSimple()\n{\n    std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx = std::make_shared<HIPRTOrochiCtx>(0);\n\n    GPUKernel test_copy_kernel;\n    test_copy_kernel.set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/Experimentations/TestCopyKernelSimple.h\");\n    test_copy_kernel.set_kernel_function_name(\"TestCopyKernelSimple\");\n    test_copy_kernel.compile(hiprt_orochi_ctx);\n\n#define BUFFER_SIZE 2560*1440\n#define ITERATIONS 4000\n\n    OrochiBuffer<TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE> buffer_a(BUFFER_SIZE);\n    OrochiBuffer<TEST_COPY_KERNEL_SIMPLE_BUFFER_TYPE> buffer_b(BUFFER_SIZE);\n\n    TestCopyKernelSimpleInputData input_data;\n    input_data.buffer_a = buffer_a.get_device_pointer();\n    input_data.buffer_b = buffer_b.get_device_pointer();\n\n    size_t buffer_size = BUFFER_SIZE;\n    void* launch_args[] = { &input_data, &buffer_size};\n\n    float average_sum = 0.0f;\n    float min_exec_time = 1000000.0f;\n    float max_exec_time = 0.0f;\n\n    for (int i = 0; i < ITERATIONS; i++)\n    {\n        float execution_time;\n        test_copy_kernel.launch_synchronous(256, 1, BUFFER_SIZE, 1, launch_args, &execution_time);\n\n        min_exec_time = hippt::min(execution_time, min_exec_time);\n        max_exec_time = hippt::max(execution_time, max_exec_time);\n        average_sum += execution_time;\n    }\n\n    std::cout << \"Min/max/average exec time:\" << min_exec_time << \" / \" << max_exec_time << \" / \" << average_sum / ITERATIONS << \" ms\" << std::endl;\n}"
  },
  {
    "path": "src/Experimentations/TestCopyKernelSimple.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef EXPERIMENTATIONS_TEST_COPY_KERNEL_SIMPLE_H\n#define EXPERIMENTATIONS_TEST_COPY_KERNEL_SIMPLE_H\n\nvoid TestCopyKernelSimple();\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/HIPRTOrochiCtx.h",
    "content": "/* \n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt \n */\n\n#ifndef HIPRT_OROCHI_CTX_H\n#define HIPRT_OROCHI_CTX_H\n\n#include <memory>\n\n#include <hiprt/hiprt.h>\n#include <hiprt/impl/Context.h>\n#include <Orochi/Orochi.h>\n\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nstruct HIPRTOrochiCtx\n{\n\tHIPRTOrochiCtx() {}\n\n\tHIPRTOrochiCtx(int device_index)\n\t{\n\t\tinit(device_index);\n\t}\n\n#ifdef _WIN32\n\tUtils::AddEnvVarError add_CUDA_PATH_to_PATH()\n\t{\n\t\t// On Windows + NVIDIA, adding the CUDA_PATH to the PATH environment variable just to be sure\n\t\t// that CUDA's DLLs are found in case the user indeed has installer the CUDA toolkit but their PATH\n\t\t// environment variable is not set correctly.\n\t\treturn Utils::windows_add_ENV_var_to_PATH(L\"CUDA_PATH\", L\"\\\\bin;\");\n\t}\n#endif\n\n\tvoid init(int device_index)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Initializing Orochi...\");\n\n#ifdef OROCHI_ENABLE_CUEW\n#ifdef _WIN32\n\t\tUtils::AddEnvVarError error = add_CUDA_PATH_to_PATH();\n#endif\n#endif\n\n#ifdef OROCHI_ENABLE_CUEW\n\t\tint error_initialize = oroInitialize((oroApi)(ORO_API_CUDA), 0);\n#else\n\t\tint error_initialize = oroInitialize((oroApi)(ORO_API_HIP), 0);\n#endif\n\t\tif (error_initialize != oroSuccess)\n\t\t{\n\t\t\tswitch (error_initialize)\n\t\t\t{\n\t\t\t\t// Unable to load HIP/CUDA\n\t\t\t\tcase ORO_API_HIPDRIVER:\n\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \n\t\t\t\t\t\t\"Unable to load HIP... Are your drivers up-to-date?\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase ORO_API_CUDADRIVER:\n\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \n\t\t\t\t\t\t\"Unable to load CUDA... Are your drivers up-to-date?\");\n\t\t\t\t\tbreak;\n\n\t\t\t\t// Unable to load HIP/CUDA\n\t\t\t\tcase ORO_API_HIPRTC:\n\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR,\n\t\t\t\t\t\t\"Unable to load HIPRTC... Is the HIP SDK (Windows) or ROCm + HIP (Linux) installed?\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase ORO_API_CUDARTC:\n\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \n\t\t\t\t\t\t\"Unable to load CUDARTC... Is the CUDA Toolkit installed + is the CUDA_PATH \"\n\t\t\t\t\t\t\"environment variable set? (or have {CUDA_TOOLKIT_FOLDER/bin} in your \"\n\t\t\t\t\t\t\"PATH environment variable)\");\n\t\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tint trash = std::getchar();\n\t\t\tstd::exit(1);\n\t\t}\n\n\t\tOROCHI_CHECK_ERROR(oroInit(0));\n\t\tOROCHI_CHECK_ERROR(oroDeviceGet(&orochi_device, device_index));\n\t\tOROCHI_CHECK_ERROR(oroCtxCreate(&orochi_ctx, 0, orochi_device));\n\n\t\tOROCHI_CHECK_ERROR(oroGetDeviceProperties(&device_properties, orochi_device));\n\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"HIPRT ver.%s\", HIPRT_VERSION_STR);\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Executing on '%s'\\n\", device_properties.name);\n\t\tif (std::string(device_properties.name).find(\"NVIDIA\") != std::string::npos)\n\t\t\thiprt_ctx_input.deviceType = hiprtDeviceNVIDIA;\n\t\telse\n\t\t\thiprt_ctx_input.deviceType = hiprtDeviceAMD;\n\n\t\thiprt_ctx_input.ctxt = oroGetRawCtx(orochi_ctx);\n\t\thiprt_ctx_input.device = oroGetRawDevice(orochi_device);\n\t\thiprtSetLogLevel(hiprtLogLevelError);\n\n\t\tHIPRT_CHECK_ERROR(hiprtCreateContext(HIPRT_API_VERSION, hiprt_ctx_input, hiprt_ctx));\n\t}\n\n\thiprtContextCreationInput hiprt_ctx_input = { nullptr, -1, hiprtDeviceAMD };\n\n\toroCtx orochi_ctx = nullptr;\n\toroDevice orochi_device = -1;\n\toroDeviceProp device_properties = {};\n\n\thiprtContext hiprt_ctx = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/HIPRTOrochiUtils.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\n#include <deque>\n#include <unordered_set>\n\nextern ImGuiLogger g_imgui_logger;\n\nvoid orochi_check_error(oroError res, const char* file, uint32_t line)\n{\n\tif (res != oroSuccess)\n\t{\n\t\tconst char* msg;\n\t\toroGetErrorString(res, &msg);\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Orochi error: '%s' on line %d in '%s'.\", msg, line, file);\n\n\t\tUtils::debugbreak();\n\t\texit(EXIT_FAILURE);\n\t}\n}\n\nvoid orochi_rtc_check_error(orortcResult res, const char* file, uint32_t line)\n{\n\tif (res != ORORTC_SUCCESS)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"ORORTC error: '%s' [ %d ] on line %d in '%s'\", orortcGetErrorString(res), res, line, file);\n\n\t\tUtils::debugbreak();\n\t\texit(EXIT_FAILURE);\n\t}\n}\n\nvoid hiprt_check_error(hiprtError res, const char* file, uint32_t line)\n{\n\tif (res != hiprtSuccess)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"HIPRT error: '%d' on line %d in '%s'.\", res, line, file);\n\n\t\tUtils::debugbreak();\n\t\texit(EXIT_FAILURE);\n\t}\n}\n\nnamespace HIPPTOrochiUtils\n{\n\tbool read_source_code(const std::string& path, std::string& sourceCode, std::vector<std::string>* includes)\n\t{\n\t\tstd::fstream f(path);\n\t\tif (f.is_open())\n\t\t{\n\t\t\tsize_t sizeFile;\n\t\t\tf.seekg(0, std::fstream::end);\n\t\t\tsize_t size = sizeFile = (size_t)f.tellg();\n\t\t\tf.seekg(0, std::fstream::beg);\n\t\t\tif (includes)\n\t\t\t{\n\t\t\t\tsourceCode.clear();\n\t\t\t\tstd::string line;\n\t\t\t\tchar buf[512];\n\t\t\t\twhile (std::getline(f, line))\n\t\t\t\t{\n\t\t\t\t\tif (strstr(line.c_str(), \"#include\") != 0)\n\t\t\t\t\t{\n\t\t\t\t\t\tconst char* a = strstr(line.c_str(), \"<\");\n\t\t\t\t\t\tconst char* b = strstr(line.c_str(), \">\");\n\t\t\t\t\t\tif (!a)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// If we couldn't find a \"<\", trying to find a '\"'\n\t\t\t\t\t\t\ta = strstr(line.c_str(), \"\\\"\");\n\t\t\t\t\t\t\tif (!a)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// Not even '\"' was find, that's invalid #include syntax\n\t\t\t\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Unable to parse header name in line '%s'\", line.c_str());\n\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t// Same thing with the ending character, '>' or another '\"'\n\t\t\t\t\t\tif (!b)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tb = strstr(a + 1, \"\\\"\");\n\n\t\t\t\t\t\t\tif (!b)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Unable to parse header name in line '%s'\", line.c_str());\n\n\t\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tint n = b - a - 1;\n\t\t\t\t\t\tmemcpy(buf, a + 1, n);\n\t\t\t\t\t\tbuf[n] = '\\0';\n\t\t\t\t\t\tincludes->push_back(buf);\n\t\t\t\t\t}\n\n\t\t\t\t\tsourceCode += line + '\\n';\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tsourceCode.resize(size, ' ');\n\t\t\t\tf.read(&sourceCode[0], size);\n\t\t\t}\n\t\t\tf.close();\n\t\t\treturn true;\n\t\t}\n\t\treturn false;\n\t}\n\n\thiprtError build_trace_kernel(hiprtContext ctxt,\n\t\tconst std::string& kernel_file_path,\n\t\tconst std::string& function_name,\n\t\thiprtApiFunction& kernel_function_out,\n\t\tconst std::vector<std::string>& additional_include_directories,\n\t\tconst std::vector<std::string>& compiler_options,\n\t\tunsigned int num_geom_types, unsigned int num_ray_types, \n\t\tbool use_compiler_cache,\n\t\thiprtFuncNameSet* func_name_set,\n\t\tconst std::string& additional_cache_key)\n\t{\n\t\tstd::string kernel_source_code;\n\t\tread_source_code(kernel_file_path, kernel_source_code);\n\n\t\tstd::vector<const char*> compiler_options_cstr;\n\t\t\n\t\tfor (const std::string& option : compiler_options)\n\t\t\tcompiler_options_cstr.push_back(option.c_str());\n\n\t\tstd::vector<std::string> additional_include_directories_options;\n\t\tadditional_include_directories_options.reserve(additional_include_directories.size());\n\t\tfor (const std::string& additional_include_dir : additional_include_directories)\n\t\t{\n\t\t\tadditional_include_directories_options.push_back(\"-I\" + additional_include_dir);\n\t\t\tcompiler_options_cstr.push_back(additional_include_directories_options.back().c_str());\n\t\t}\n\n\t\tconst char* func_name_cstr = function_name.c_str();\n\t\treturn hiprtBuildTraceKernels(\n\t\t\tctxt,\n\t\t\t1,\n\t\t\t&func_name_cstr,\n\t\t\tkernel_source_code.c_str(),\n\t\t\tkernel_file_path.c_str(),\n\t\t\t0,\n\t\t\tnullptr,\n\t\t\tnullptr,\n\t\t\tcompiler_options_cstr.size(),\n\t\t\tcompiler_options_cstr.size() > 0 ? compiler_options_cstr.data() : nullptr,\n\t\t\tnum_geom_types,\n\t\t\tnum_ray_types,\n\t\t\tfunc_name_set,\n\t\t\t&kernel_function_out,\n\t\t\tnullptr,\n\t\t\tuse_compiler_cache, \n\t\t\tadditional_cache_key);\n\t}\n}\n"
  },
  {
    "path": "src/HIPRT-Orochi/HIPRTOrochiUtils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HIPRTPT_OROCHI_UTILS_H\n#define HIPRTPT_OROCHI_UTILS_H\n\n#include <hiprt/hiprt.h>\n#include <Orochi/Orochi.h>\n\n#include <filesystem>\n#include <optional>\n\n#define OROCHI_CHECK_ERROR( error ) ( orochi_check_error( error, __FILE__, __LINE__ ) )\n#define OROCHI_RTC_CHECK_ERROR( error ) ( orochi_rtc_check_error( error, __FILE__, __LINE__ ) )\n#define HIPRT_CHECK_ERROR( error ) ( hiprt_check_error( error, __FILE__, __LINE__ ) )\n\n// This flag isn't defined in Orochi for some reasons ?\n// It allows sampling textures with normalized coordinates in [0, 1[ instead of \n// [0, width[\n#define ORO_TRSF_NORMALIZED_COORDINATES 0x02\n\nnamespace HIPPTOrochiUtils\n{\n\t/*\n\t * Reads a given file, outputs its code in 'sourceCode' and a list of the names\n\t * of the files included in the source file by #include directives in 'includes'\n\t * \n\t * If 'includes' is nullptr, then no include names will be returned\n\t */\n\tbool read_source_code(const std::string& path, std::string& sourceCode, std::vector<std::string>* includes = nullptr);\n\n\t/**\n\t * Note, the 'additional_include_directories' are expected to be given are relative folder\n\t * path \"../../myIncludeDir\" without any \"-I\" prefix\n\t */\n\thiprtError build_trace_kernel(hiprtContext ctxt,\n\t\tconst std::string& kernel_file_path,\n\t\tconst std::string& function_name,\n\t\thiprtApiFunction& kernel_function_out,\n\t\tconst std::vector<std::string>& additional_include_directories,\n\t\tconst std::vector<std::string>& compiler_options,\n\t\tunsigned int num_geom_types, unsigned int num_ray_types,\n\t\tbool use_compiler_cache,\n\t\thiprtFuncNameSet* func_name_set = nullptr,\n\t\tconst std::string& additional_cache_key = \"\");\n}\n\nvoid orochi_check_error(oroError res, const char* file, uint32_t line);\nvoid orochi_rtc_check_error(orortcResult res, const char* file, uint32_t line);\nvoid hiprt_check_error(hiprtError res, const char* file, uint32_t line);\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/HIPRTScene.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HIPRT_SCENE_H\n#define HIPRT_SCENE_H\n\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"HIPRT-Orochi/OrochiTexture.h\"\n#include \"Renderer/GPUDataStructures/MaterialPackedSoAGPUData.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/PrecomputedEmissiveTrianglesDataSoAHost.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n\n#include \"hiprt/hiprt.h\"\n#include \"Orochi/Orochi.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nstruct HIPRTGeometry\n{\n\tHIPRTGeometry() : m_hiprt_ctx(nullptr) {}\n\tHIPRTGeometry(hiprtContext ctx) : m_hiprt_ctx(ctx) {}\n\n\t~HIPRTGeometry()\n\t{\n\t\tif (m_mesh.triangleIndices)\n\t\t\tOROCHI_CHECK_ERROR(oroFree(reinterpret_cast<oroDeviceptr>(m_mesh.triangleIndices)));\n\n\t\tif (m_mesh.vertices && m_allow_free_mesh_vertices)\n\t\t\tOROCHI_CHECK_ERROR(oroFree(reinterpret_cast<oroDeviceptr>(m_mesh.vertices)));\n\n\t\tif (m_geometry)\n\t\t\tHIPRT_CHECK_ERROR(hiprtDestroyGeometry(m_hiprt_ctx, m_geometry));\n\t}\n\n\tvoid upload_triangle_indices(const std::vector<int>& triangles_indices)\n\t{\n\t\tint triangle_count = triangles_indices.size() / 3;\n\t\t// Allocating and initializing the indices buffer\n\t\tm_mesh.triangleCount = triangle_count;\n\t\tm_mesh.triangleStride = sizeof(int3);\n\t\tOROCHI_CHECK_ERROR(oroMalloc(reinterpret_cast<oroDeviceptr*>(&m_mesh.triangleIndices), triangle_count * sizeof(int3)));\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(m_mesh.triangleIndices), triangles_indices.data(), triangle_count * sizeof(int3), oroMemcpyHostToDevice));\n\t}\n\n\tstd::vector<int> download_triangle_indices()\n\t{\n\t\tif (m_mesh.vertices != nullptr)\n\t\t\treturn OrochiBuffer<int>::download_data(reinterpret_cast<int*>(m_mesh.triangleIndices), m_mesh.triangleCount * 3);\n\n\t\treturn std::vector<int>();\n\t}\n\n\tvoid upload_vertices_positions(const std::vector<float3>& vertices_positions)\n\t{\n\t\t// Allocating and initializing the vertices positions buiffer\n\t\tm_mesh.vertexCount = vertices_positions.size();\n\t\tm_mesh.vertexStride = sizeof(float3);\n\t\tOROCHI_CHECK_ERROR(oroMalloc(reinterpret_cast<oroDeviceptr*>(&m_mesh.vertices), m_mesh.vertexCount * sizeof(float3)));\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(m_mesh.vertices), vertices_positions.data(), m_mesh.vertexCount * sizeof(float3), oroMemcpyHostToDevice));\n\t}\n\n\tvoid copy_vertices_positions_from(const HIPRTGeometry& other_geometry)\n\t{\n\t\tm_mesh.vertexCount = other_geometry.m_mesh.vertexCount;\n\t\tm_mesh.vertexStride = other_geometry.m_mesh.vertexStride;\n\t\tm_mesh.vertices = other_geometry.m_mesh.vertices;\n\t\t// This structure is not going to free the mesh vertices because it is\n\t\t// managed by another HIPRTGeometry\n\t\tm_allow_free_mesh_vertices = false;\n\t}\n\n\tstd::vector<float3> download_vertices_positions()\n\t{\n\t\tif (m_mesh.vertices != nullptr)\n\t\t\treturn OrochiBuffer<float3>::download_data(reinterpret_cast<float3*>(m_mesh.vertices), m_mesh.vertexCount);\n\n\t\treturn std::vector<float3>();\n\t}\n\n\tvoid log_bvh_building(hiprtBuildFlags build_flags)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Compiling BVH building kernels & building scene BVH...\");\n\t}\n\n\tvoid build_bvh(hiprtBuildFlags build_flags, bool do_compaction, bool disable_spatial_splits_on_OOM, oroStream_t build_stream)\n\t{\n\t\tauto start = std::chrono::high_resolution_clock::now();\n\n\t\tif (m_geometry != nullptr)\n\t\t{\n\t\t\tHIPRT_CHECK_ERROR(hiprtDestroyGeometry(m_hiprt_ctx, m_geometry));\n\n\t\t\tm_geometry = nullptr;\n\t\t}\n\n\t\tif (m_mesh.vertexCount == 0 || m_mesh.triangleCount == 0)\n\t\t\t// No BVH to build\n\t\t\treturn;\n\n\t\thiprtBuildOptions build_options;\n\t\thiprtGeometryBuildInput geometry_build_input;\n\t\tsize_t geometry_temp_size;\n\t\thiprtDevicePtr geometry_temp;\n\n\t\tbuild_options.buildFlags = build_flags;\n\n\t\tgeometry_build_input.type = hiprtPrimitiveTypeTriangleMesh;\n\t\tgeometry_build_input.primitive.triangleMesh = m_mesh;\n\t\t// Geom type 0 here \n\t\tgeometry_build_input.geomType = 0;\n\n\t\tlog_bvh_building(build_options.buildFlags);\n\t\t// Getting the buffer sizes for the construction of the BVH\n\t\tHIPRT_CHECK_ERROR(hiprtGetGeometryBuildTemporaryBufferSize(m_hiprt_ctx, geometry_build_input, build_options, geometry_temp_size));\n\n\t\toroError_t error = oroMalloc(reinterpret_cast<oroDeviceptr*>(&geometry_temp), geometry_temp_size);\n\t\tif (error != oroSuccess && error == oroErrorOutOfMemory && disable_spatial_splits_on_OOM)\n\t\t{\n\t\t\tif (error == oroErrorOutOfMemory && disable_spatial_splits_on_OOM)\n\t\t\t{\n\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Out of memory while trying to build the BVH... Retrying without spatial splits. Tracing performance may suffer...\");\n\n\t\t\t\tbuild_options.buildFlags |= hiprtBuildFlagBitDisableSpatialSplits;\n\n\t\t\t\tHIPRT_CHECK_ERROR(hiprtGetGeometryBuildTemporaryBufferSize(m_hiprt_ctx, geometry_build_input, build_options, geometry_temp_size));\n\t\t\t\terror = oroMalloc(reinterpret_cast<oroDeviceptr*>(&geometry_temp), geometry_temp_size);\n\n\t\t\t\tif (error != oroSuccess)\n\t\t\t\t{\n\t\t\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Error while trying to build the BVH even without spatial splits... Aborting...\");\n\n\t\t\t\t\tOROCHI_CHECK_ERROR(error);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\telse\n\t\t\tOROCHI_CHECK_ERROR(error);\n\n\t\tHIPRT_CHECK_ERROR(hiprtCreateGeometry(m_hiprt_ctx, geometry_build_input, build_options, m_geometry));\n\t\tHIPRT_CHECK_ERROR(hiprtBuildGeometry(m_hiprt_ctx, hiprtBuildOperationBuild, geometry_build_input, build_options, geometry_temp, build_stream, m_geometry));\n\t\tOROCHI_CHECK_ERROR(oroFree(reinterpret_cast<oroDeviceptr>(geometry_temp)));\n\n\t\tif (do_compaction)\n\t\t\tHIPRT_CHECK_ERROR(hiprtCompactGeometry(m_hiprt_ctx, 0, m_geometry, m_geometry));\n\n\t\tauto stop = std::chrono::high_resolution_clock::now();\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"BVH built in %ldms\", std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count());\n\t}\n\n\thiprtContext m_hiprt_ctx = nullptr;\n\thiprtTriangleMeshPrimitive m_mesh = { nullptr };\n\t// One geometry for the whole scene for now\n\thiprtGeometry m_geometry = nullptr;\n\n\tbool m_allow_free_mesh_vertices = true;\n};\n\nstruct HIPRTScene\n{\n\tvoid print_statistics(std::ostream& stream)\n\t{\n\t\tstream << \"Scene statistics: \" << std::endl;\n\t\tstream << \"\\t\" << whole_scene_BLAS.m_mesh.vertexCount << \" vertices\" << std::endl;\n\t\tstream << \"\\t\" << whole_scene_BLAS.m_mesh.triangleCount << \" triangles\" << std::endl;\n\t\tstream << \"\\t\" << emissive_triangles_primitive_indices.size() << \" emissive triangles\" << std::endl;\n\t\tstream << \"\\t\" << materials_buffer.m_element_count << \" materials\" << std::endl;\n\t\tstream << \"\\t\" << orochi_materials_textures.size() << \" textures\" << std::endl;\n\t}\n\n\tHIPRTGeometry whole_scene_BLAS;\n\tHIPRTGeometry emissive_triangles_BLAS;\n\n\tOrochiBuffer<float> triangle_areas;\n\tOrochiBuffer<unsigned char> has_vertex_normals;\n\tOrochiBuffer<float3> vertex_normals;\n\tOrochiBuffer<int> material_indices;\n\tDevicePackedTexturedMaterialSoAGPUData materials_buffer;\n\n\t// This vector contains true for a material that has a fully opaque base color texture.\n\t// Otherwise, the texture has some alpha transparency in it\n\t//\n\t// This vector isn't used on the GPU, it's only used by the CPU to basically remember which \n\t// materials had textures with some alpha in it\n\tstd::vector<bool> material_has_opaque_base_color_texture;\n\tOrochiBuffer<unsigned char> material_opaque;\n\n\tint emissive_triangles_count = 0;\n\tOrochiBuffer<int> emissive_triangles_primitive_indices;\n\tOrochiBuffer<int> emissive_triangles_indices_and_emissive_textures;\n\tOrochiBuffer<float> emissive_power_alias_table_probas;\n\tOrochiBuffer<int> emissive_power_alias_table_alias;\n\t// This is a remnant of some tests and it was actually not worth it\n\tPrecomputedEmissiveTrianglesDataSoAHost<OrochiBuffer> precomputed_emissive_triangles_data;\n\n\t// Vector to keep the textures data alive otherwise the OrochiTexture objects would\n\t// be destroyed which means that the underlying textures would be destroyed\n\tstd::vector<OrochiTexture> orochi_materials_textures;\n\tOrochiBuffer<oroTextureObject_t> gpu_materials_textures;\n\tOrochiBuffer<float2> texcoords_buffer;\n};\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiBuffer.h",
    "content": "/* \n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OROCHI_BUFFER_H\n#define OROCHI_BUFFER_H\n\n#include \"hiprt/hiprt.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"Orochi/Orochi.h\"\n#include \"UI/DisplayView/DisplayTextureType.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n#include \"GL/glew.h\"\n#include \"tracy/TracyOpenGL.hpp\"\n\nextern ImGuiLogger g_imgui_logger;\n\ntemplate <typename T>\nclass OrochiBuffer\n{\npublic:\n\tusing value_type = T;\n\n\tOrochiBuffer() : m_data_pointer(nullptr) {}\n\tOrochiBuffer(int element_count);\n\tOrochiBuffer(OrochiBuffer<T>&& other);\n\t~OrochiBuffer();\n\n\tvoid operator=(OrochiBuffer<T>&& other) noexcept;\n\n\tvoid memset_whole_buffer(T value);\n\n\tvoid resize(int new_element_count, size_t type_size_override = 0);\n\tvoid resize_host_pinned_mem(int new_element_count, size_t type_size_override = 0);\n\tsize_t size() const;\n\tsize_t get_byte_size() const;\n\n\tconst T* get_device_pointer() const;\n\tT* get_device_pointer();\n\n\tconst T* get_host_pinned_pointer() const;\n\tT* get_host_pinned_pointer();\n\n\t/**\n\t * Returns a pointer to the device buffer but cast into an AtomicType\n\t */\n\tconst AtomicType<T>* get_atomic_device_pointer() const;\n\tAtomicType<T>* get_atomic_device_pointer();\n\n\t/**\n\t * data() is just an alias for get_device_pointer()\n\t */\n\tconst T* data() const;\n\tT* data();\n\n\tbool is_allocated() const;\n\n\t/** \n\t * Static function for downloading from a device buffer when we\n\t * only have the address of the buffer (and not the OrochiBuffer object)\n\t */\n\tstatic std::vector<T> download_data(T* device_data_pointer, size_t element_count);\n\tstd::vector<T> download_data() const;\n\t/**\n\t * Download the data of the *whole* buffer directly to the given 'host_pointer'\n\t * \n\t * The given 'host_pointer' is supposed to be pointing to an allocated memory block\n\t * that is large enough to accomodate all the data of this buffer. Behavior is undefined\n\t * if this is not the case\n\t * \n\t * 'host_pointer' can also be the pointer returned by 'get_host_pinned_pointer()' if using host pinned\n\t * memory\n\t */\n\tvoid download_data_into(T* host_pointer) const;\n\n\t/**\n\t * Downloads elements ['start_element_index', 'stop_element_index_excluded'[ from the buffer\n\t */\n\tstd::vector<T> download_data_partial(int start_element_index, int stop_element_index_excluded) const;\n\tvoid download_data_async(void* out, oroStream_t stream) const;\n\n\n\tstatic void upload_data(T* device_data_pointer, const std::vector<T>& data_to_upload, size_t element_count);\n\tstatic void upload_data(T* device_data_pointer, const T* data_to_upload, size_t element_count);\n\t/**\n\t * Uploads as many elements as returned by size from the data std::vector into the buffer.\n\t * The given std::vector must therefore contain at least size() elements.\n\t * \n\t * The overload using a void pointer reads sizeof(T) * size() bytes starting at\n\t * the given pointer address. The given pointer must therefore provide a contiguous access\n\t * to sizeof(T) * size() bytes of data\n\t */\n\tvoid upload_data(const std::vector<T>& data);\n\tvoid upload_data(const T* data);\n\t/**\n\t * Uploads 'element_count' elmements from 'data' starting (it will be overriden) at element number 'start_index' in the buffer\n\t */\n\tvoid upload_data_partial(int start_index, const T* data, size_t element_count);\n\n\tvoid unpack_to_GL_texture(GLuint texture, GLint texture_unit, int width, int height, DisplayTextureType texture_type);\n\n\t/**\n\t * Copies the data in 'other' to this buffer.\n\t * \n\t * This copies the maximum amount of data from 'other' that can fit in this buffer\n\t */\n\tvoid memcpy_from(const OrochiBuffer<T>& other);\n\tvoid memcpy_from(T* data_source, size_t element_count_to_copy);\n\n\t/**\n\t * Frees the buffer. No effect if already freed / not allocated yet\n\t */\n\tvoid free();\n\nprivate:\n\tbool m_pinned_memory = false;\n\n\tT* m_data_pointer = nullptr;\n\n\tsize_t m_element_count = 0;\n};\n\ntemplate <typename T>\nOrochiBuffer<T>::OrochiBuffer(int element_count) : m_element_count(element_count)\n{\n\tOROCHI_CHECK_ERROR(oroMalloc(reinterpret_cast<oroDeviceptr*>(&m_data_pointer), sizeof(T) * element_count));\n}\n\ntemplate <typename T>\nOrochiBuffer<T>::OrochiBuffer(OrochiBuffer<T>&& other)\n{\n\tm_data_pointer = other.m_data_pointer;\n\tm_element_count = other.m_element_count;\n\n\tother.m_data_pointer = nullptr;\n\tother.m_element_count = 0;\n}\n\ntemplate <typename T>\nOrochiBuffer<T>::~OrochiBuffer()\n{\n\tif (m_data_pointer)\n\t\tfree();\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::operator=(OrochiBuffer&& other) noexcept\n{\n\tif (m_data_pointer)\n\t\tfree();\n\n\tm_data_pointer = other.m_data_pointer;\n\tm_element_count = other.m_element_count;\n\n\tother.m_data_pointer = nullptr;\n\tother.m_element_count = 0;\n}\n\ntemplate<typename T>\ninline void OrochiBuffer<T>::memset_whole_buffer(T value)\n{\n\tif (m_data_pointer == nullptr)\n\t{\n \t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to memset on an OrochiBuffer that hasn't been allocated yet!\");\n\t\treturn;\n\t}\n\n\tstd::vector<T> data(m_element_count, value);\n\n\tupload_data(data);\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::resize(int new_element_count, size_t type_size_override)\n{\n\tif (m_data_pointer)\n\t\tfree();\n\n\tsize_t buffer_size = type_size_override != 0 ? (type_size_override * new_element_count) : (sizeof(T) * new_element_count);\n\tOROCHI_CHECK_ERROR(oroMalloc(reinterpret_cast<oroDeviceptr*>(&m_data_pointer), buffer_size));\n\n\tm_element_count = new_element_count;\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::resize_host_pinned_mem(int new_element_count, size_t type_size_override)\n{\n\tif (m_data_pointer)\n\t\tfree();\n\n\tsize_t buffer_size = type_size_override != 0 ? (type_size_override * new_element_count) : (sizeof(T) * new_element_count);\n\tOROCHI_CHECK_ERROR(oroHostMalloc(reinterpret_cast<oroDeviceptr*>(&m_data_pointer), buffer_size, 0));\n\n\tm_element_count = new_element_count;\n\tm_pinned_memory = true;\n}\n\ntemplate <typename T>\nsize_t OrochiBuffer<T>::size() const\n{\n\treturn m_element_count;\n}\n\ntemplate <typename T>\nsize_t OrochiBuffer<T>::get_byte_size() const\n{\n\treturn m_element_count * sizeof(T);\n}\n\ntemplate <typename T>\nconst T* OrochiBuffer<T>::get_device_pointer() const\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the device_pointer of an OrochiBuffer that hasn't been allocated!\");\n\n\t\treturn nullptr;\n\t}\n\n\tif (m_pinned_memory)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the device_pointer of an OrochiBuffer that has been allocated with host pinned memory. Pinned host memory doesn't have device pointers. Use get_host_pinned_pointer()\");\n\n\t\treturn nullptr;\n\t}\n\telse\n\t\treturn m_data_pointer;\n}\n\ntemplate <typename T>\nT* OrochiBuffer<T>::get_device_pointer()\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the device_pointer of an OrochiBuffer that hasn't been allocated!\");\n\n\t\treturn nullptr;\n\t}\n\n\tif (m_pinned_memory)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the device_pointer of an OrochiBuffer that has been allocated with host pinned memory. Pinned host memory doesn't have device pointers. Use get_host_pinned_pointer()\");\n\n\t\treturn nullptr;\n\t}\n\telse\n\t\treturn m_data_pointer;\n}\n\ntemplate <typename T>\nconst AtomicType<T>* OrochiBuffer<T>::get_atomic_device_pointer() const\n{\n\treturn reinterpret_cast<AtomicType<T>*>(get_device_pointer());\n}\n\ntemplate <typename T>\nAtomicType<T>* OrochiBuffer<T>::get_atomic_device_pointer()\n{\n\treturn reinterpret_cast<AtomicType<T>*>(get_device_pointer());\n}\n\ntemplate <typename T>\nconst T* OrochiBuffer<T>::get_host_pinned_pointer() const\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the host_pinned_pointer of an OrochiBuffer that hasn't been allocated!\");\n\n\t\treturn nullptr;\n\t}\n\telse if (!m_pinned_memory)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the host_pinned_pointer of an OrochiBuffer that has been allocated for the device! Use get_device_pointer() instead\");\n\n\t\treturn nullptr;\n\t}\n\n\treturn m_data_pointer;\n}\n\ntemplate <typename T>\nT* OrochiBuffer<T>::get_host_pinned_pointer()\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the host_pinned_pointer of an OrochiBuffer that hasn't been allocated!\");\n\n\t\treturn nullptr;\n\t}\n\telse if (!m_pinned_memory)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Getting the host_pinned_pointer of an OrochiBuffer that has been allocated for the device! Use get_device_pointer() instead\");\n\n\t\treturn nullptr;\n\t}\n\n\treturn m_data_pointer;\n}\n\n\ntemplate <typename T>\nconst T* OrochiBuffer<T>::data() const\n{\n\treturn get_device_pointer();\n}\n\ntemplate <typename T>\nT* OrochiBuffer<T>::data()\n{\n\treturn get_device_pointer();\n}\n\ntemplate <typename T>\nbool OrochiBuffer<T>::is_allocated() const\n{\n\treturn m_data_pointer != nullptr;\n}\n\n// Static function\ntemplate <typename T>\nstd::vector<T> OrochiBuffer<T>::download_data(T* device_data_pointer, size_t element_count)\n{\n\tif (!device_data_pointer)\n\t\treturn std::vector<T>();\n\n\tstd::vector<T> data(element_count);\n\n\tOROCHI_CHECK_ERROR(oroMemcpyDtoH(data.data(), reinterpret_cast<oroDeviceptr>(device_data_pointer), sizeof(T) * element_count));\n\n\treturn data;\n}\n\ntemplate <typename T>\nstd::vector<T> OrochiBuffer<T>::download_data() const\n{\n\tif (!m_data_pointer)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to download data from a non-allocated buffer!\");\n\n\t\treturn std::vector<T>();\n\t}\n\n\tstd::vector<T> data(m_element_count);\n\n\tOROCHI_CHECK_ERROR(oroMemcpyDtoH(data.data(), reinterpret_cast<oroDeviceptr>(m_data_pointer), sizeof(T) * m_element_count));\n\n\treturn data;\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::download_data_into(T* host_pointer) const\n{\n\tif (!m_data_pointer)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to download data into a host pinned buffer from a non-allocated buffer!\");\n\n\t\treturn;\n\t}\n\n\tOROCHI_CHECK_ERROR(oroMemcpyDtoH(host_pointer, reinterpret_cast<oroDeviceptr>(m_data_pointer), sizeof(T) * m_element_count));\n}\n\ntemplate<typename T>\ninline std::vector<T> OrochiBuffer<T>::download_data_partial(int start_element_index, int stop_element_index_excluded) const\n{\n\tif (!m_data_pointer)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to download data from a non-allocated buffer!\");\n\n\t\treturn std::vector<T>();\n\t}\n\n\tif (start_element_index == stop_element_index_excluded || stop_element_index_excluded < start_element_index)\n\t\treturn std::vector<T>();\n\n\tsize_t element_count = stop_element_index_excluded - start_element_index;\n\tstd::vector<T> data(element_count);\n\n\tOROCHI_CHECK_ERROR(oroMemcpyDtoH(data.data() + start_element_index, reinterpret_cast<oroDeviceptr>(m_data_pointer), sizeof(T) * element_count));\n\n\treturn data;\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::download_data_async(void* out, oroStream_t stream) const \n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to download data async from a non-allocated buffer!\");\n\n\t\tUtils::debugbreak();\n\n\t\treturn;\n\t}\n\n\tOROCHI_CHECK_ERROR(oroMemcpyAsync(out, m_data_pointer, m_element_count * sizeof(T), oroMemcpyDeviceToHost, stream));\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::upload_data(T* device_data_pointer, const std::vector<T>& data_to_upload, size_t element_count)\n{\n\tOrochiBuffer<T>::upload_data(device_data_pointer, data_to_upload.data(), element_count);\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::upload_data(T* device_data_pointer, const T* data_to_upload, size_t element_count)\n{\n\tif (device_data_pointer && data_to_upload)\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(device_data_pointer), data_to_upload, sizeof(T) * element_count, oroMemcpyHostToDevice));\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::upload_data(const std::vector<T>& data)\n{\n\tif (m_data_pointer)\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(m_data_pointer), data.data(), sizeof(T) * hippt::min(data.size(), m_element_count), oroMemcpyHostToDevice));\n\telse\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to upload data to an OrochiBuffer that hasn't been allocated yet!\");\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::upload_data(const T* data)\n{\n\tif (m_data_pointer)\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(m_data_pointer), data, sizeof(T) * m_element_count, oroMemcpyHostToDevice));\n\telse\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to upload data to an OrochiBuffer that hasn't been allocated yet!\");\n}\n\ntemplate<typename T>\ninline void OrochiBuffer<T>::upload_data_partial(int start_index, const T* data, size_t element_count)\n{\n\tif (start_index > m_element_count)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to upload partial data to an OrochiBuffer starting at in an index that is larger than the buffer's size!\");\n\n\t\treturn;\n\t}\n\n\tif (m_data_pointer)\n\t\tOROCHI_CHECK_ERROR(oroMemcpy(reinterpret_cast<oroDeviceptr>(m_data_pointer + start_index), data, sizeof(T) * element_count, oroMemcpyHostToDevice));\n\telse\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to upload partial data to an OrochiBuffer that hasn't been allocated yet!\");\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::unpack_to_GL_texture(GLuint texture, GLint texture_unit, int width, int height, DisplayTextureType texture_type)\n{\n\tglActiveTexture(texture_unit);\n\tglBindTexture(GL_TEXTURE_2D, texture);\n\n\t// Downloading the Orochi buffer and then uploading it back to the GPU.\n\t// Isn't that great code?\n\t//\n\t// The proper solution would be to use OpenGL Interop to copy the Orochi buffer\n\t// to the underlying array of the OpenGL texture. But it seems like OpenGL interop can only\n\t// do that for RGBA textures. But we're not stricly using RGBA textures here. The template type\n\t// could be anything really and it at least doesn't work with float3 types because float3 are RGB,\n\t// not RGBA and again, OpenGL Interop throws an error at 'oroGraphicsGLRegisterImage' for RGB\n\t// textures.\n\t//\n\t// So to fix this, we could use an RGBA OpenGL texture in place of RGB. But then, in the case of \n\t// world-space normals buffer for example, we have to convert our float3 normals to float4 to upload\n\t// to the RGBA texture. And that conversion would be expensive (and require memory)\n\t//\n\t// We could also just use float4 data all the way for the normals. We wouldn't have any conversion to do.\n\t// But we would have a conversion to perform before denoising and so the issues would be the same\n\t//\n\t// So maybe there is another solution besides the RGBA OpenGL Interop but too lazy, this is an unlikely\n\t// code path in the application anyways\n\tstd::vector<T> data = download_data();\n\tglTexImage2D(GL_TEXTURE_2D, 0, texture_type.get_gl_internal_format(), width, height, 0, texture_type.get_gl_format(), texture_type.get_gl_type(), data.data());\n\n\t//oroGraphicsResource_t graphics_resource = nullptr;\n\t//OROCHI_CHECK_ERROR(oroGraphicsGLRegisterImage(&graphics_resource, texture, GL_TEXTURE_2D, oroGraphicsRegisterFlagsWriteDiscard));\n\n\t//// Map the OpenGL texture for CUDA/HIP access\n\t//OROCHI_CHECK_ERROR(oroGraphicsMapResources(1, &graphics_resource, 0));\n\n\t//// Access the CUDA/HIP array used by the OpenGL texture under the hood\n\t//oroArray_t array = nullptr;\n\t//\tOROCHI_CHECK_ERROR(oroGraphicsSubResourceGetMappedArray(&array, graphics_resource, 0, 0));\n\n\t//// Copy data from the CUDA buffer to the CUDA array\n\t//\tOROCHI_CHECK_ERROR(oroMemcpy2DToArray(array, 0, 0, m_data_pointer, width * texture_type.sizeof_type(), width * texture_type.sizeof_type(), height, oroMemcpyDeviceToDevice));\n\n\t//// Unmap the OpenGL texture\n\t//OROCHI_CHECK_ERROR(oroGraphicsUnmapResources(1, &graphics_resource, 0));\n}\n\ntemplate<typename T>\ninline void OrochiBuffer<T>::memcpy_from(const OrochiBuffer<T>& other)\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to memcpy_from() into an OrochiBuffer that hasn't been allocated yet!\");\n\n\t\treturn;\n\t}\n\n\tsize_t size_to_copy = std::min(other.m_element_count, m_element_count);\n\tOROCHI_CHECK_ERROR(oroMemcpy(m_data_pointer, other.get_device_pointer(), size_to_copy * sizeof(T), oroMemcpyDeviceToDevice));\n}\n\ntemplate<typename T>\ninline void OrochiBuffer<T>::memcpy_from(T* data_source, size_t element_count_to_copy)\n{\n\tif (m_data_pointer == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to memcpy_from() into an OrochiBuffer that hasn't been allocated yet!\");\n\n\t\treturn;\n\t}\n\n\tOROCHI_CHECK_ERROR(oroMemcpy(m_data_pointer, data_source, element_count_to_copy * sizeof(T), oroMemcpyDeviceToDevice));\n}\n\ntemplate <typename T>\nvoid OrochiBuffer<T>::free()\n{\n\tif (m_data_pointer)\n\t{\n\t\tif (m_pinned_memory)\n\t\t\tOROCHI_CHECK_ERROR(oroHostFree(reinterpret_cast<oroDeviceptr>(m_data_pointer)));\n\t\telse\n\t\t\tOROCHI_CHECK_ERROR(oroFree(reinterpret_cast<oroDeviceptr>(m_data_pointer)));\n\t}\n\telse\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Freeing an OrochiBuffer buffer that hasn't been initialized (or has been freed already)!\");\n\n\t\treturn;\n\t}\n\n\tm_element_count = 0;\n\tm_data_pointer = nullptr;\n\tm_pinned_memory = false;\n}\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiEnvmap.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"HIPRT-Orochi/OrochiEnvmap.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nOrochiEnvmap::OrochiEnvmap(Image32Bit& image) : OrochiTexture(image) {}\n\nOrochiEnvmap::OrochiEnvmap(OrochiEnvmap&& other) noexcept : OrochiTexture(std::move(other))\n{\n\tm_cdf = std::move(other.m_cdf);\n}\n\nvoid OrochiEnvmap::operator=(OrochiEnvmap&& other) noexcept\n{\n\tOrochiTexture::operator=(std::move(other));\n\n\tm_cdf = std::move(other.m_cdf);\n}\n\nvoid OrochiEnvmap::init_from_image(const Image32Bit& image)\n{\n\tOrochiTexture::init_from_image(image);\n}\n\nvoid OrochiEnvmap::compute_cdf(const Image32Bit& image)\n{\n\tstd::vector<float> cdf = image.compute_cdf();\n\t// When computing the CDF, the total sum is actually the last element. Handy.\n\tm_luminance_total_sum = cdf.back();\n\n\tm_cdf.resize(width * height);\n\tm_cdf.upload_data(cdf.data());\n}\n\nfloat* OrochiEnvmap::get_cdf_device_pointer()\n{\n\tif (m_cdf.size() == 0)\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to get the CDF of an OrochiEnvmap whose CDF wasn't computed in the first place...\");\n\n\treturn m_cdf.get_device_pointer();\n}\n\nvoid OrochiEnvmap::free_cdf()\n{\n\tm_cdf.free();\n}\n\nvoid OrochiEnvmap::compute_alias_table(const Image32Bit& image)\n{\n\tstd::vector<float> probas;\n\tstd::vector<int> alias;\n\timage.compute_alias_table(probas, alias, &m_luminance_total_sum);\n\n\tm_alias_table_probas.resize(width * height);\n\tm_alias_table_alias.resize(width * height);\n\n\tm_alias_table_probas.upload_data(probas.data());\n\tm_alias_table_alias.upload_data(alias.data());\n}\n\nvoid OrochiEnvmap::get_alias_table_device_pointers(float*& probas, int*& aliases)\n{\n\tprobas = m_alias_table_probas.get_device_pointer();\n\taliases = m_alias_table_alias.get_device_pointer();\n}\n\nvoid OrochiEnvmap::free_alias_table()\n{\n\tm_alias_table_probas.free();\n\tm_alias_table_alias.free();\n}\n\nfloat OrochiEnvmap::get_luminance_total_sum() const\n{\n\treturn m_luminance_total_sum;\n}\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiEnvmap.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OROCHI_ENVMAP_H\n#define OROCHI_ENVMAP_H\n\n#include \"HIPRT-Orochi/OrochiTexture.h\"\n\nclass OrochiEnvmap : public OrochiTexture\n{\npublic:\n\tOrochiEnvmap() : OrochiTexture() {}\n\tOrochiEnvmap(Image32Bit& image);\n\tOrochiEnvmap(const OrochiEnvmap& other) = delete;\n\tOrochiEnvmap(OrochiEnvmap&& other) noexcept;\n\n\tvoid operator=(const OrochiEnvmap other) = delete;\n\tvoid operator=(const OrochiEnvmap& other) = delete;\n\tvoid operator=(OrochiEnvmap&& other) noexcept;\n\n\tvoid init_from_image(const Image32Bit& image);\n\n\tvoid compute_cdf(const Image32Bit& image);\n\tfloat* get_cdf_device_pointer();\n\tvoid free_cdf();\n\n\tvoid compute_alias_table(const Image32Bit& image);\n\tvoid get_alias_table_device_pointers(float*& probas, int*& aliases);\n\tvoid free_alias_table();\n\n\t/**\n\t * Returns the sum of the luminance of all the texels of the envmap.\n\t * This value is not computed by this function but is computed by compute_cdf()\n\t * and compute_alias_table() so one of these two functions must be\n\t * called before calling 'get_luminance_total_sum' or 'get_luminance_total_sum'\n\t * will return 0.0f\n\t */\n\tfloat get_luminance_total_sum() const;\n\nprivate:\n\tfloat m_luminance_total_sum = 0.0f;\n\n\tOrochiBuffer<float> m_cdf;\n\n\tOrochiBuffer<float> m_alias_table_probas;\n\tOrochiBuffer<int> m_alias_table_alias;\n};\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiTexture.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"HIPRT-Orochi/OrochiTexture.h\"\n\n#include <Orochi/Orochi.h>\n\nOrochiTexture::OrochiTexture(const Image8Bit& image, hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode)\n{\n\tinit_from_image(image, filtering_mode, address_mode);\n}\n\nOrochiTexture::OrochiTexture(const Image32Bit& image, hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode)\n{\n\tinit_from_image(image, filtering_mode, address_mode);\n}\n\nOrochiTexture::OrochiTexture(OrochiTexture&& other) noexcept\n{\n\tm_texture_array = std::move(other.m_texture_array);\n\tm_texture = std::move(other.m_texture);\n\n\tother.m_texture = nullptr;\n\tother.m_texture_array = nullptr;\n}\n\nOrochiTexture::~OrochiTexture()\n{\n\tif (m_texture)\n\t\toroDestroyTextureObject(m_texture);\n\n\tif (m_texture_array)\n\t\toroFree(m_texture_array);\n}\n\nvoid OrochiTexture::operator=(OrochiTexture&& other) noexcept\n{\n\tm_texture_array = std::move(other.m_texture_array);\n\tm_texture = std::move(other.m_texture);\n\n\tother.m_texture = nullptr;\n\tother.m_texture_array = nullptr;\n}\n\nvoid create_texture_from_array_cuda(void* m_texture_array, void* m_texture, void* filtering_mode, void* address_mode, bool read_mode_float_normalized);\n\nvoid OrochiTexture::create_texture_from_array(hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode, bool read_mode_float_normalized)\n{\n#ifndef OROCHI_ENABLE_CUEW\n\t// Using native HIP here to access 'normalizedCoords' which isn't exposed by Orochi\n\n\thipResourceDesc resource_descriptor = {};\n\tresource_descriptor.resType = hipResourceTypeArray;\n\tresource_descriptor.res.array.array = m_texture_array;\n\n\thipTextureDesc texture_descriptor = {};\n\ttexture_descriptor.addressMode[0] = address_mode;\n\ttexture_descriptor.addressMode[1] = address_mode;\n\ttexture_descriptor.addressMode[2] = address_mode;\n\ttexture_descriptor.filterMode = filtering_mode;\n\ttexture_descriptor.normalizedCoords = true;\n\ttexture_descriptor.readMode = read_mode_float_normalized ? hipTextureReadMode::hipReadModeNormalizedFloat : hipTextureReadMode::hipReadModeElementType;\n\ttexture_descriptor.sRGB = false;\n\n\tOROCHI_CHECK_ERROR(hipCreateTextureObject(&m_texture, &resource_descriptor, &texture_descriptor, nullptr));\n#else\n\t// Using native CUDA here to access 'normalizedCoords' which isn't  exposed by Orochi\n\t// Note that this function is defined in another compile unit because we need to include CUDA headers\n\t// and they conflict with HIP headers (structures redefinition, float2, float4, ...) it seems so we need to separate them\n\tcreate_texture_from_array_cuda(m_texture_array, &m_texture, &filtering_mode, &address_mode, read_mode_float_normalized);\n#endif\n}\n\nvoid OrochiTexture::init_from_image(const Image8Bit& image, hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode)\n{\n\tint channels = image.channels;\n\tif (channels == 3 || channels > 4)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"3-channels textures not supported on the GPU yet.\");\n\n\t\treturn;\n\t}\n\n\twidth = image.width;\n\theight = image.height;\n\n\tif (width == 0 || height == 0)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Image given to OrochiTexture is 0 in width or height\");\n\n\t\tUtils::debugbreak();\n\t}\n\n\tint bits_channel_x = (channels >= 1) ? 8 : 0; // First channel (e.g., Red)\n\tint bits_channel_y = (channels >= 2) ? 8 : 0; // Second channel (e.g., Green)\n\tint bits_channel_z = (channels >= 3) ? 8 : 0; // Third channel (e.g., Blue)\n\tint bits_channel_w = (channels == 4) ? 8 : 0; // Fourth channel (e.g., Alpha)\n\toroChannelFormatDesc channel_descriptor = oroCreateChannelDesc(bits_channel_x, bits_channel_y, bits_channel_z, bits_channel_w,\n\t\toroChannelFormatKindUnsigned);\n\tOROCHI_CHECK_ERROR(oroMallocArray(&m_texture_array, &channel_descriptor, image.width, image.height, oroArrayDefault));\n\tOROCHI_CHECK_ERROR(oroMemcpy2DToArray(m_texture_array, 0, 0, image.data().data(), \n\t\timage.width * channels * sizeof(unsigned char), \n\t\timage.width * sizeof(unsigned char) * channels, \n\t\timage.height, oroMemcpyHostToDevice));\n\t\n\tcreate_texture_from_array(filtering_mode, address_mode, true);\n}\n\nvoid OrochiTexture::init_from_image(const Image32Bit& image, hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode)\n{\n\tint channels = image.channels;\n\tif (channels == 3 || channels > 4)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"3-channels textures not supported on the GPU yet.\");\n\t\t\n\t\treturn;\n\t}\n\n\twidth = image.width;\n\theight = image.height;\n\n\tif (width == 0 || height == 0)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Image given to OrochiTexture is 0 in width or height\");\n\n\t\tUtils::debugbreak();\n\t}\n\n\tint bits_channel_x = (channels >= 1) ? 32 : 0; // First channel (e.g., Red)\n\tint bits_channel_y = (channels >= 2) ? 32 : 0; // Second channel (e.g., Green)\n\tint bits_channel_z = (channels >= 3) ? 32 : 0; // Third channel (e.g., Blue)\n\tint bits_channel_w = (channels == 4) ? 32 : 0; // Fourth channel (e.g., Alpha)\n\toroChannelFormatDesc channel_descriptor = oroCreateChannelDesc(bits_channel_x, bits_channel_y, bits_channel_z, bits_channel_w,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t   oroChannelFormatKindFloat);\n\n\tOROCHI_CHECK_ERROR(oroMallocArray(&m_texture_array, &channel_descriptor, image.width, image.height, oroArrayDefault));\n\tOROCHI_CHECK_ERROR(oroMemcpy2DToArray(m_texture_array, 0, 0, image.data().data(), \n\t\timage.width * channels * sizeof(float),\n\t\timage.width * sizeof(float) * channels, \n\t\timage.height, oroMemcpyHostToDevice));\n\n\tcreate_texture_from_array(filtering_mode, address_mode, false);\n}\n\noroTextureObject_t OrochiTexture::get_device_texture()\n{\n\treturn m_texture;\n}\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiTexture.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OROCHI_TEXTURE_H\n#define OROCHI_TEXTURE_H\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"Image/Image.h\"\n\nclass OrochiTexture\n{\npublic:\n\tOrochiTexture() {}\n\tOrochiTexture(const Image8Bit& image, hipTextureFilterMode filtering_mode = hipFilterModePoint, hipTextureAddressMode address_mode = hipAddressModeWrap);\n\tOrochiTexture(const Image32Bit& image, hipTextureFilterMode filtering_mode = hipFilterModePoint, hipTextureAddressMode address_mode = hipAddressModeWrap);\n\tOrochiTexture(const OrochiTexture& other) = delete;\n\tOrochiTexture(OrochiTexture&& other) noexcept;\n\t~OrochiTexture();\n\n\tvoid operator=(const OrochiTexture& other) = delete;\n\tvoid operator=(OrochiTexture&& other) noexcept;\n\n\tvoid init_from_image(const Image8Bit& image, hipTextureFilterMode filtering_mode = hipFilterModePoint, hipTextureAddressMode address_mode = hipAddressModeWrap);\n\tvoid init_from_image(const Image32Bit& image, hipTextureFilterMode filtering_mode = hipFilterModePoint, hipTextureAddressMode address_mode = hipAddressModeWrap);\n\n\toroTextureObject_t get_device_texture();\n\n\tunsigned int width = 0, height = 0;\n\nprivate:\n\n\tvoid create_texture_from_array(hipTextureFilterMode filtering_mode, hipTextureAddressMode address_mode, bool read_mode_float_normalized);\n\n\toroArray_t m_texture_array = nullptr;\n\n\toroTextureObject_t m_texture = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiTexture3D.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"HIPRT-Orochi/OrochiTexture3D.h\"\n\n#include <Orochi/Orochi.h>\n\nOrochiTexture3D::OrochiTexture3D(const std::vector<Image8Bit>& images, HIPfilter_mode filtering_mode, HIPaddress_mode address_mode)\n{\n\tinit_from_images(images, filtering_mode, address_mode);\n}\n\nOrochiTexture3D::OrochiTexture3D(const std::vector<Image32Bit>& images, HIPfilter_mode filtering_mode, HIPaddress_mode address_mode)\n{\n\tinit_from_images(images, filtering_mode, address_mode);\n}\n\nOrochiTexture3D::OrochiTexture3D(OrochiTexture3D&& other) noexcept\n{\n\tm_texture_array = std::move(other.m_texture_array);\n\tm_texture = std::move(other.m_texture);\n\n\tother.m_texture = nullptr;\n\tother.m_texture_array = nullptr;\n}\n\nOrochiTexture3D::~OrochiTexture3D()\n{\n\tif (m_texture)\n\t\toroDestroyTextureObject(m_texture);\n\n\tif (m_texture_array)\n\t\toroFree(m_texture_array);\n}\n\nvoid OrochiTexture3D::operator=(OrochiTexture3D&& other) noexcept\n{\n\tm_texture_array = std::move(other.m_texture_array);\n\tm_texture = std::move(other.m_texture);\n\n\tother.m_texture = nullptr;\n\tother.m_texture_array = nullptr;\n}\n\nvoid OrochiTexture3D::init_from_images(const std::vector<Image8Bit>& images, HIPfilter_mode filtering_mode, HIPaddress_mode address_mode)\n{\n\tint channels = images[0].channels;\n\tif (channels == 3 || channels > 4)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"3-channels textures not supported on the GPU yet.\");\n\n\t\treturn;\n\t}\n\n\twidth = images[0].width;\n\theight = images[0].height;\n\tdepth = images.size();\n\n\tint bits_channel_x = (channels >= 1) ? 8 : 0; // First channel (e.g., Red)\n\tint bits_channel_y = (channels >= 2) ? 8 : 0; // Second channel (e.g., Green)\n\tint bits_channel_z = (channels >= 3) ? 8 : 0; // Third channel (e.g., Blue)\n\tint bits_channel_w = (channels == 4) ? 8 : 0; // Fourth channel (e.g., Alpha)\n\toroChannelFormatDesc channel_descriptor = oroCreateChannelDesc(bits_channel_x, bits_channel_y, bits_channel_z, bits_channel_w,\n\t\toroChannelFormatKindUnsigned);\n\n\tOROCHI_CHECK_ERROR(oroMalloc3DArray(&m_texture_array, &channel_descriptor, oroExtent{ width, height, depth }, oroArrayDefault));\n\n\t// Because we'r ecopying to a CUDA/HIP array, we need the input data\n\t// to be in a single linear block of data\n\tstd::vector<float> linear_image_data(width * height * depth);\n\tfor (int i = 0; i < images.size(); i++)\n\t\tstd::copy(images[i].data().begin(), images[i].data().end(), linear_image_data.begin() + width * height * i);\n\n\toroMemcpy3DParms copyParams = { 0 };\n\tcopyParams.dstArray = m_texture_array;\n\tcopyParams.extent = { width, height, depth };\n\tcopyParams.kind = oroMemcpyHostToDevice;\n\tcopyParams.srcPtr = oroPitchedPtr{ linear_image_data.data(), width, width, height };\n\tOROCHI_CHECK_ERROR(oroMemcpy3D(&copyParams));\n\n\t// Resource descriptor\n\tORO_RESOURCE_DESC resource_descriptor;\n\tstd::memset(&resource_descriptor, 0, sizeof(resource_descriptor));\n\tresource_descriptor.resType = ORO_RESOURCE_TYPE_ARRAY;\n\tresource_descriptor.res.array.hArray = m_texture_array;\n\n\tORO_TEXTURE_DESC texture_descriptor;\n\tstd::memset(&texture_descriptor, 0, sizeof(texture_descriptor));\n\ttexture_descriptor.addressMode[0] = address_mode;\n\ttexture_descriptor.addressMode[1] = address_mode;\n\ttexture_descriptor.addressMode[2] = address_mode;\n\ttexture_descriptor.filterMode = filtering_mode;\n\n\tOROCHI_CHECK_ERROR(oroTexObjectCreate(&m_texture, &resource_descriptor, &texture_descriptor, nullptr));\n}\n\nvoid OrochiTexture3D::init_from_images(const std::vector<Image32Bit>& images, HIPfilter_mode filtering_mode, HIPaddress_mode address_mode)\n{\n\tint channels = images[0].channels;\n\tif (channels == 3 || channels > 4)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"3-channels textures not supported on the GPU yet.\");\n\n\t\treturn;\n\t}\n\n\twidth = images[0].width;\n\theight = images[0].height;\n\tdepth = images.size();\n\n\tint bits_channel_x = (channels >= 1) ? 32 : 0; // First channel (e.g., Red)\n\tint bits_channel_y = (channels >= 2) ? 32 : 0; // Second channel (e.g., Green)\n\tint bits_channel_z = (channels >= 3) ? 32 : 0; // Third channel (e.g., Blue)\n\tint bits_channel_w = (channels == 4) ? 32 : 0; // Fourth channel (e.g., Alpha)\n\toroChannelFormatDesc channel_descriptor = oroCreateChannelDesc(bits_channel_x, bits_channel_y, bits_channel_z, bits_channel_w,\n\t\toroChannelFormatKindFloat);\n\n\tOROCHI_CHECK_ERROR(oroMalloc3DArray(&m_texture_array, &channel_descriptor, oroExtent{ width, height, depth }, oroArrayDefault));\n\n\t// Because we'r ecopying to a CUDA/HIP array, we need the input data\n\t// to be in a single linear block of data\n\tstd::vector<float> linear_image_data(width * height * depth * channels);\n\tfor (int i = 0; i < images.size(); i++)\n\t\tstd::copy(images[i].data().begin(), images[i].data().end(), linear_image_data.begin() + width * height * i * channels);\n\n\toroMemcpy3DParms copyParams = { 0 };\n\tcopyParams.srcPtr = oroPitchedPtr{ linear_image_data.data(), width * channels * sizeof(float), width * channels, height};\n\tcopyParams.dstArray = m_texture_array;\n\tcopyParams.extent = { width, height, depth };\n\tcopyParams.kind = oroMemcpyHostToDevice;\n\tOROCHI_CHECK_ERROR(oroMemcpy3D(&copyParams));\n\n\t// Resource descriptor\n\tORO_RESOURCE_DESC resource_descriptor;\n\tstd::memset(&resource_descriptor, 0, sizeof(resource_descriptor));\n\tresource_descriptor.resType = ORO_RESOURCE_TYPE_ARRAY;\n\tresource_descriptor.res.array.hArray = m_texture_array;\n\n\tORO_TEXTURE_DESC texture_descriptor;\n\tstd::memset(&texture_descriptor, 0, sizeof(texture_descriptor));\n\ttexture_descriptor.addressMode[0] = address_mode;\n\ttexture_descriptor.addressMode[1] = address_mode;\n\ttexture_descriptor.addressMode[2] = address_mode;\n\ttexture_descriptor.filterMode = filtering_mode;\n\n\tOROCHI_CHECK_ERROR(oroTexObjectCreate(&m_texture, &resource_descriptor, &texture_descriptor, nullptr));\n}\n\noroTextureObject_t OrochiTexture3D::get_device_texture()\n{\n\treturn m_texture;\n}\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiTexture3D.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OROCHI_TEXTURE_3D_H\n#define OROCHI_TEXTURE_3D_H\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"Image/Image.h\"\n\n#include <vector>\n\nclass OrochiTexture3D\n{\npublic:\n\tOrochiTexture3D() {}\n\tOrochiTexture3D(const std::vector<Image8Bit>& image, HIPfilter_mode filtering_mode = ORO_TR_FILTER_MODE_POINT, HIPaddress_mode address_mode = ORO_TR_ADDRESS_MODE_WRAP);\n\tOrochiTexture3D(const std::vector<Image32Bit>& image, HIPfilter_mode filtering_mode = ORO_TR_FILTER_MODE_POINT, HIPaddress_mode address_mode = ORO_TR_ADDRESS_MODE_WRAP);\n\tOrochiTexture3D(const OrochiTexture3D& other) = delete;\n\tOrochiTexture3D(OrochiTexture3D&& other) noexcept;\n\t~OrochiTexture3D();\n\n\tvoid operator=(const OrochiTexture3D& other) = delete;\n\tvoid operator=(OrochiTexture3D&& other) noexcept;\n\n\tvoid init_from_images(const std::vector<Image8Bit>& images, HIPfilter_mode filtering_mode = ORO_TR_FILTER_MODE_POINT, HIPaddress_mode address_mode = ORO_TR_ADDRESS_MODE_WRAP);\n\tvoid init_from_images(const std::vector<Image32Bit>& images, HIPfilter_mode filtering_mode = ORO_TR_FILTER_MODE_POINT, HIPaddress_mode address_mode = ORO_TR_ADDRESS_MODE_WRAP);\n\n\toroTextureObject_t get_device_texture();\n\n\tunsigned int width = 0, height = 0, depth = 0;\n\nprivate:\n\toroArray_t m_texture_array = nullptr;\n\n\toroTextureObject_t m_texture = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HIPRT-Orochi/OrochiTextureCUDA.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifdef OROCHI_ENABLE_CUEW\n\n#include \"cuda_runtime_api.h\"\n#include \"contrib/cuew/include/cuew.h\"\n\n#include \"Utils/Utils.h\"\n\nvoid create_texture_from_array_cuda(void* m_texture_array, void* m_texture, void* filtering_mode, void* address_mode, bool read_mode_float_normalized)\n{\n\t// Resource descriptor\n\tcudaResourceDesc resource_descriptor = {};\n\tresource_descriptor.resType = cudaResourceTypeArray;\n\tresource_descriptor.res.array.array = reinterpret_cast<cudaArray_t>(m_texture_array);\n\n\tcudaTextureDesc texture_descriptor = {};\n\ttexture_descriptor.addressMode[0] = *reinterpret_cast<cudaTextureAddressMode*>(address_mode);\n\ttexture_descriptor.addressMode[1] = *reinterpret_cast<cudaTextureAddressMode*>(address_mode);\n\ttexture_descriptor.addressMode[2] = *reinterpret_cast<cudaTextureAddressMode*>(address_mode);\n\ttexture_descriptor.filterMode = *reinterpret_cast<cudaTextureFilterMode*>(filtering_mode);\n\ttexture_descriptor.normalizedCoords = true;\n\ttexture_descriptor.readMode = read_mode_float_normalized ? cudaTextureReadMode::cudaReadModeNormalizedFloat : cudaTextureReadMode::cudaReadModeElementType;\n\ttexture_descriptor.sRGB = false;\n\n\tcudaError_t error = cudaCreateTextureObject_oro(reinterpret_cast<cudaTextureObject_t*>(m_texture), &resource_descriptor, &texture_descriptor, nullptr);\n\tif (error != cudaError::cudaSuccess)\n\t\tUtils::debugbreak();\n}\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/AtomicType.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_ATOMIC_TYPE_H\n#define HOST_DEVICE_COMMON_ATOMIC_TYPE_H\n\n#ifdef __KERNELCC__\ntemplate <typename T>\nusing AtomicType = T;\n#else\n#include <atomic>\n\ntemplate <typename T>\nusing AtomicType = std::atomic<T>;\n#endif\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/BSDFsData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_BSDFS_DATA_H\n#define HOST_DEVICE_COMMON_BSDFS_DATA_H\n\n#include \"HostDeviceCommon/MicrofacetRegularizationSettings.h\"\n\n /**\n  * What masking-shadowing term to use with the GGX NDF.\n  *\n  * 'HeightCorrelated' is a little be more precise and\n  * corect than 'HeightUncorrelated' so it should basically\n  * always be preferred.\n  * \n  * This is basically only for experimentation purposes\n  */\nenum GGXMaskingShadowingFlavor\n{\n\tHeightCorrelated,\n\tHeightUncorrelated\n};\n\nstruct BRDFsData\n{\n\tbool white_furnace_mode = false;\n\tbool white_furnace_mode_turn_off_emissives = true;\n\n\t// 32x32 texture containing the precomputed parameters of the LTC\n\t// fitted to approximate the SSGX sheen volumetric layer.\n\t// See SheenLTCFittedParameters.h\n\tvoid* sheen_ltc_parameters_texture = nullptr;\n\n\t// 2D texture for the precomputed directional albedo\n\t// for the GGX BRDFs used in the principled BSDF for energy compensation\n\t// of conductors\n\tvoid* GGX_conductor_directional_albedo = nullptr;\n\n\t// 3D texture for the precomputed directional albedo of the base layer\n\t// of the principled BSDF (specular GGX layer + diffuse below)\n\tvoid* glossy_dielectric_directional_albedo = nullptr;\n\n\t// 3D texture (cos_theta_o, roughness, relative_eta) for the precomputed\n\t// directional albedo used for energy compensation of glass objects when\n\t// entering a medium\n\tvoid* GGX_glass_directional_albedo = nullptr;\n\t// Table when leaving a medium\n\tvoid* GGX_glass_directional_albedo_inverse = nullptr;\n\n\t// Table for energy compesantion of thin walled glass\n\t// Fetching into this table should use the base roughness\n\t// of the material i.e. **not** the remapped thin-walled roughness\n\tvoid* GGX_thin_glass_directional_albedo = nullptr;\n\n\t// Whether or not to use the texture unit's hardware texel interpolation\n\t// when fetching the LUTs. It's faster but less precise.\n\tbool use_hardware_tex_interpolation = false;\n\n\tGGXMaskingShadowingFlavor GGX_masking_shadowing = GGXMaskingShadowingFlavor::HeightUncorrelated;\n\n\tfloat energy_compensation_roughness_threshold = 0.0f;\n\n\t// After hom many bounces to stop doing energy compensation to save performance?\n\t// \n\t// For example, 0 means that energy compensation will only be done on the first hit and\n\t// not later\n\t//\n\t// -1 to disable\n\tint glass_energy_compensation_max_bounce = -1;\n\tint metal_energy_compensation_max_bounce = -1;\n\tint clearcoat_energy_compensation_max_bounce = -1;\n\tint glossy_base_energy_compensation_max_bounce = -1;\n\n\tMicrofacetRegularizationSettings microfacet_regularization;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Color.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_COLOR_H\n#define HOST_DEVICE_COMMON_COLOR_H\n\n#include \"Device/includes/Hash.h\"\n#include \"HostDeviceCommon/Math.h\"\n\nstruct ColorRGBA32F\n{\n    HIPRT_HOST_DEVICE ColorRGBA32F() { r = 0.0f; g = 0.0f; b = 0.0f; a = 1.0f; }\n    HIPRT_HOST_DEVICE explicit ColorRGBA32F(float value) { r = value; g = value; b = value; a = 1.0f; }\n    HIPRT_HOST_DEVICE ColorRGBA32F(float _r, float _g, float _b, float _a) { r = _r; g = _g; b = _b; a = _a; }\n    HIPRT_HOST_DEVICE explicit ColorRGBA32F(float4 vec) { r = vec.x; g = vec.y; b = vec.z; a = vec.w; }\n\n    HIPRT_HOST_DEVICE void operator+=(const ColorRGBA32F& other) { r += other.r; g += other.g; b += other.b; a += other.a; }\n    HIPRT_HOST_DEVICE void operator-=(const ColorRGBA32F& other) { r -= other.r; g -= other.g; b -= other.b; a -= other.a; }\n    HIPRT_HOST_DEVICE void operator*=(const ColorRGBA32F& other) { r *= other.r; g *= other.g; b *= other.b; a *= other.a; }\n    HIPRT_HOST_DEVICE void operator*=(float k) { r *= k; g *= k; b *= k; a *= k; }\n    HIPRT_HOST_DEVICE void operator/=(const ColorRGBA32F& other) { r /= other.r; g /= other.g; b /= other.b; a /= other.a; }\n    HIPRT_HOST_DEVICE void operator/=(float k) { r /= k; g /= k; b /= k; a /= k; }\n    HIPRT_HOST_DEVICE bool operator!=(const ColorRGBA32F& other) { return r != other.r || g != other.g || b != other.g || a != other.a; }\n\n    HIPRT_HOST_DEVICE float length() const { return sqrtf(this->length2()); }\n    HIPRT_HOST_DEVICE float length2() const { return r * r + g * g + b * b + a * a; }\n    HIPRT_HOST_DEVICE float luminance() const { return 0.3086f * r + 0.6094f * g + 0.0820f * b; }\n    HIPRT_HOST_DEVICE void clamp(float min, float max) { r = hippt::clamp(min, max, r); g = hippt::clamp(min, max, g); b = hippt::clamp(min, max, b); a = hippt::clamp(min, max, a); }\n    HIPRT_HOST_DEVICE ColorRGBA32F clamped(float min, float max) { return ColorRGBA32F(hippt::clamp(min, max, r), g = hippt::clamp(min, max, g), b = hippt::clamp(min, max, b), a = hippt::clamp(min, max, a)); }\n    HIPRT_HOST_DEVICE bool has_nan() const { return hippt::is_nan(r) || hippt::is_nan(g) || hippt::is_nan(b) || hippt::is_nan(a); }\n    HIPRT_HOST_DEVICE bool has_inf() const { return hippt::is_inf(r) || hippt::is_inf(g) || hippt::is_inf(b) || hippt::is_inf(a); }\n    HIPRT_HOST_DEVICE bool is_black() const { return !(r > 0.0f || g > 0.0f || b > 0.0f); }\n    HIPRT_HOST_DEVICE bool is_white() const { return r == 1.0f && g == 1.0f && b == 1.0f; }\n\n\n    HIPRT_HOST_DEVICE float max_component() const { return hippt::max(r, hippt::max(g, hippt::max(b, a))); }\n    HIPRT_HOST_DEVICE float min_component() const { return hippt::min(r, hippt::min(g, hippt::min(b, a))); }\n    HIPRT_HOST_DEVICE ColorRGBA32F normalized() const { float length = sqrtf(r * r + g * g + b * b); return ColorRGBA32F(r / length, g / length, b / length, /* not normalizing alpha */ a); }\n\n    HIPRT_HOST_DEVICE ColorRGBA32F abs() { return ColorRGBA32F(hippt::abs(this->r), hippt::abs(this->g), hippt::abs(this->b), hippt::abs(this->a)); }\n    HIPRT_HOST_DEVICE void max(const ColorRGBA32F& maxer) { this->r = hippt::max(this->r, maxer.r); this->g = hippt::max(this->g, maxer.g); this->b = hippt::max(this->b, maxer.b); this->a = hippt::max(this->a, maxer.a); }\n    HIPRT_HOST_DEVICE static ColorRGBA32F max(const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(hippt::max(a.r, b.r), hippt::max(a.g, b.g), hippt::max(a.b, b.b), hippt::max(a.a, b.a)); }\n    HIPRT_HOST_DEVICE static ColorRGBA32F min(const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(hippt::min(a.r, b.r), hippt::min(a.g, b.g), hippt::min(a.b, b.b), hippt::min(a.a, b.a)); }\n\n    HIPRT_HOST_DEVICE float& operator[](int index) { return *(&r + index); }\n    HIPRT_HOST_DEVICE float operator[](int index) const { return *(&r + index); }\n\n    float r, g, b, a;\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator+ (const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(a.r + b.r, a.g + b.g, a.b + b.b, a.a + b.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator- (const ColorRGBA32F& c) { return ColorRGBA32F(-c.r, -c.g, -c.b, c.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator- (const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(a.r - b.r, a.g - b.g, a.b - b.b, a.a - b.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator* (const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(a.r * b.r, a.g * b.g, a.b * b.b, a.a * b.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator* (const float k, const ColorRGBA32F& c) { return ColorRGBA32F(c.r * k, c.g * k, c.b * k, c.a * k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator* (const ColorRGBA32F& c, const float k) { return ColorRGBA32F(c.r * k, c.g * k, c.b * k, c.a * k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator/ (const ColorRGBA32F& a, const ColorRGBA32F& b) { return ColorRGBA32F(a.r / b.r, a.g / b.g, a.b / b.b, a.a / b.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator/ (const float k, const ColorRGBA32F& c) { return ColorRGBA32F(k / c.r, k / c.g, k / c.b, k / c.a); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F operator/ (const ColorRGBA32F& c, const float k) { return ColorRGBA32F(c.r / k, c.g / k, c.b / k, c.a / k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F sqrt(const ColorRGBA32F& col) { return ColorRGBA32F(sqrtf(col.r), sqrtf(col.g), sqrtf(col.b), sqrtf(col.a)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F exp(const ColorRGBA32F& col) { return ColorRGBA32F(expf(col.r), expf(col.g), expf(col.b), expf(col.a)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F exp2(const ColorRGBA32F& col) { return ColorRGBA32F(exp2f(col.r), exp2f(col.g), exp2f(col.b), exp2f(col.a)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F log(const ColorRGBA32F& col) { return ColorRGBA32F(logf(col.r), logf(col.g), logf(col.b), logf(col.a)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F pow(const ColorRGBA32F& col, float k) { return ColorRGBA32F(powf(col.r, k), powf(col.g, k), powf(col.b, k), powf(col.a, k)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGBA32F intrin_pow(ColorRGBA32F x, float y) { return ColorRGBA32F(hippt::intrin_pow(x.r, y), hippt::intrin_pow(x.g, y), hippt::intrin_pow(x.b, y), hippt::intrin_pow(x.a, y)); }\n\nstruct ColorRGB32F\n{\n    HIPRT_HOST_DEVICE ColorRGB32F() { r = 0.0f; g = 0.0f; b = 0.0f; }\n    HIPRT_HOST_DEVICE explicit ColorRGB32F(float value) { r = value; g = value; b = value; }\n    HIPRT_HOST_DEVICE ColorRGB32F(float _r, float _g, float _b) { r = _r; g = _g; b = _b; }\n    HIPRT_HOST_DEVICE explicit ColorRGB32F(float3 vec) { r = vec.x; g = vec.y; b = vec.z; }\n    // W component of float4 is dropped\n    HIPRT_HOST_DEVICE explicit ColorRGB32F(float4 vec) { r = vec.x; g = vec.y; b = vec.z; }\n    // This constructor drops the alpha channel\n    HIPRT_HOST_DEVICE explicit ColorRGB32F(const ColorRGBA32F& rgba) { r = rgba.r; g = rgba.g; b = rgba.b; }\n\n    HIPRT_HOST_DEVICE void operator+=(const ColorRGB32F& other) { r += other.r; g += other.g; b += other.b; }\n    HIPRT_HOST_DEVICE void operator-=(const ColorRGB32F& other) { r -= other.r; g -= other.g; b -= other.b; }\n    HIPRT_HOST_DEVICE void operator*=(const ColorRGB32F& other) { r *= other.r; g *= other.g; b *= other.b; }\n    HIPRT_HOST_DEVICE void operator*=(float k) { r *= k; g *= k; b *= k; }\n    HIPRT_HOST_DEVICE void operator/=(const ColorRGB32F& other) { r /= other.r; g /= other.g; b /= other.b; }\n    HIPRT_HOST_DEVICE void operator/=(float k) { r /= k; g /= k; b /= k; }\n    HIPRT_HOST_DEVICE bool operator!=(const ColorRGB32F& other) { return r != other.r || g != other.g || b != other.g; }\n\n    HIPRT_HOST_DEVICE float length() const { return sqrtf(this->length2()); }\n    HIPRT_HOST_DEVICE float length2() const { return r * r + g * g + b * b; }\n    HIPRT_HOST_DEVICE float luminance() const { return 0.3086f * r + 0.6094f * g + 0.0820f * b; }\n    HIPRT_HOST_DEVICE void clamp(float min, float max) { r = hippt::clamp(min, max, r); g = hippt::clamp(min, max, g); b = hippt::clamp(min, max, b); }\n    HIPRT_HOST_DEVICE ColorRGB32F clamped(float min, float max) { return ColorRGB32F(hippt::clamp(min, max, r), g = hippt::clamp(min, max, g), b = hippt::clamp(min, max, b)); }\n    HIPRT_HOST_DEVICE bool has_nan() const { return hippt::is_nan(r) || hippt::is_nan(g) || hippt::is_nan(b); }\n    HIPRT_HOST_DEVICE bool has_inf() const { return hippt::is_inf(r) || hippt::is_inf(g) || hippt::is_inf(b); }\n    HIPRT_HOST_DEVICE bool is_black() const { return !(r > 0.0f || g > 0.0f || b > 0.0f); }\n    HIPRT_HOST_DEVICE bool is_white() const { return r == 1.0f && g == 1.0f && b == 1.0f; }\n\n    HIPRT_HOST_DEVICE float max_component() const { return hippt::max(r, hippt::max(g, b)); }\n    HIPRT_HOST_DEVICE float min_component() const { return hippt::min(r, hippt::min(g, b)); }\n    HIPRT_HOST_DEVICE ColorRGB32F normalized() const { float length = sqrtf(r * r + g * g + b * b); return ColorRGB32F(r / length, g / length, b / length); }\n\n    HIPRT_HOST_DEVICE ColorRGB32F abs() { return ColorRGB32F(hippt::abs(this->r), hippt::abs(this->g), hippt::abs(this->b)); }\n    HIPRT_HOST_DEVICE void max(const ColorRGB32F& maxer) { this->r = hippt::max(this->r, maxer.r); this->g = hippt::max(this->g, maxer.g); this->b = hippt::max(this->b, maxer.b); }\n    HIPRT_HOST_DEVICE static ColorRGB32F max(const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(hippt::max(a.r, b.r), hippt::max(a.g, b.g), hippt::max(a.b, b.b)); }\n    HIPRT_HOST_DEVICE static ColorRGB32F min(const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(hippt::min(a.r, b.r), hippt::min(a.g, b.g), hippt::min(a.b, b.b)); }\n\n    HIPRT_HOST_DEVICE float& operator[](int index) { return *(&r + index); }\n    HIPRT_HOST_DEVICE float operator[](int index) const { return *(&r + index); }\n\n    HIPRT_HOST_DEVICE static ColorRGB32F random_color(unsigned int seed) \n    {\n        constexpr unsigned int UNSIGNED_INT_MAX = 0xffffffff;\n\n        unsigned int seed1 = wang_hash(seed);\n        unsigned int seed2 = wang_hash(seed1);\n        unsigned int seed3 = wang_hash(seed2);\n\n        return ColorRGB32F(seed1 / static_cast<float>(UNSIGNED_INT_MAX), seed2 / static_cast<float>(UNSIGNED_INT_MAX), seed3 / static_cast<float>(UNSIGNED_INT_MAX));\n    }\n\n    float r, g, b;\n};\n\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator+ (const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(a.r + b.r, a.g + b.g, a.b + b.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator- (const ColorRGB32F& c) { return ColorRGB32F(-c.r, -c.g, -c.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator- (const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(a.r - b.r, a.g - b.g, a.b - b.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator* (const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(a.r * b.r, a.g * b.g, a.b * b.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator* (const float k, const ColorRGB32F& c) { return ColorRGB32F(c.r * k, c.g * k, c.b * k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator* (const ColorRGB32F& c, const float k) { return ColorRGB32F(c.r * k, c.g * k, c.b * k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator/ (const ColorRGB32F& a, const ColorRGB32F& b) { return ColorRGB32F(a.r / b.r, a.g / b.g, a.b / b.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator/ (const float k, const ColorRGB32F& c) { return ColorRGB32F(k / c.r, k / c.g, k / c.b); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F operator/ (const ColorRGB32F& c, const float k) { return ColorRGB32F(c.r / k, c.g / k, c.b / k); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F sqrt(const ColorRGB32F& col) { return ColorRGB32F(sqrtf(col.r), sqrtf(col.g), sqrtf(col.b)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F exp(const ColorRGB32F& col) { return ColorRGB32F(expf(col.r), expf(col.g), expf(col.b)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F exp2(const ColorRGB32F& col) { return ColorRGB32F(exp2f(col.r), exp2f(col.g), exp2f(col.b)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F log(const ColorRGB32F& col) { return ColorRGB32F(logf(col.r), logf(col.g), logf(col.b)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F pow(const ColorRGB32F& col, float k) { return ColorRGB32F(powf(col.r, k), powf(col.g, k), powf(col.b, k)); }\nHIPRT_HOST_DEVICE HIPRT_INLINE ColorRGB32F intrin_pow(ColorRGB32F x, float y) { return ColorRGB32F(hippt::intrin_pow(x.r, y), hippt::intrin_pow(x.g, y), hippt::intrin_pow(x.b, y)); }\n\n#ifndef __KERNELCC__\ninline std::ostream& operator <<(std::ostream& os, const ColorRGB32F& color)\n{\n    os << color.r << \", \" << color.g << \", \" << color.b;\n\n    return os;\n}\n\ninline std::ostream& operator <<(std::ostream& os, const ColorRGBA32F& color)\n{\n    os << color.r << \", \" << color.g << \", \" << color.b << \", \" << color.a;\n\n    return os;\n}\n#endif\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/HIPRTCamera.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_HIPRT_CAMERA_H\n#define HOST_DEVICE_COMMON_HIPRT_CAMERA_H\n\n#include \"HostDeviceCommon/Math.h\"\n\n#include <hiprt/hiprt_types.h> // for hiprtRay\n\n/**\n * Simplified camera class passed to the shader\n */\nstruct HIPRTCamera\n{\n    float4x4 inverse_view;\n    float4x4 inverse_projection;\n    float4x4 view_projection;\n    float3 position;\n\n    float vertical_fov;\n    int sensor_width, sensor_height;\n\n    bool do_jittering = true;\n\n    /**\n     * Returns a camera ray for pixel (x, y) and the given render solution\n     */\n    HIPRT_HOST_DEVICE hiprtRay get_camera_ray(float x, float y, int2 res)\n    {\n        float x_ndc_space = x / res.x * 2 - 1;\n        float y_ndc_space = y / res.y * 2 - 1;\n\n        float3 ray_origin_view_space = { 0.0f, 0.0f, 0.0f };\n        float3 ray_origin = matrix_X_point(inverse_view, ray_origin_view_space);\n\n        // Point on the near plane\n        float3 ray_point_dir_ndc_homog = { x_ndc_space, y_ndc_space, -1.0f };\n        float3 ray_point_dir_vs_homog = matrix_X_point(inverse_projection, ray_point_dir_ndc_homog);\n        float3 ray_point_dir_vs = ray_point_dir_vs_homog;\n        float3 ray_point_dir_ws = matrix_X_point(inverse_view, ray_point_dir_vs);\n\n        float3 ray_direction = hippt::normalize(ray_point_dir_ws - ray_origin);\n\n        hiprtRay ray;\n        ray.origin = ray_origin;\n        ray.direction = ray_direction;\n\n        return ray;\n    }\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/HitInfo.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_HIT_INFO_H\n#define HOST_DEVICE_COMMON_HIT_INFO_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Math.h\"\n\nstruct HitInfo\n{\n    float3 inter_point = { 0, 0, 0 };\n    float3 shading_normal = { 0, 0, 0 };\n    float3 geometric_normal = { 0, 0, 0 };\n    // TODO is texcoords useful? This may actually be returned by the intersection function and used only for reading textures but then we don't need it anymore when evaluating the bSDF and comp�ting the main path tracing stuff so let's save some registers\n    float2 texcoords = { 0, 0 };\n\n    // Distance along ray\n    float t = -1.0f;\n\n    int primitive_index = -1;\n};\n\n/**\n * Information returned by a shadow ray cast from a BSDF sample. \n *\n * This structure is filled by the 'evaluate_bsdf_light_sample_ray()' \n * function that is usually called for testing if a BSDF ray \n * (used by MIS) sees some emissive geometry or not.\n */\nstruct BSDFLightSampleRayHitInfo\n{\n    // TODO do we use this only for the area of the light? In which case we can just store the area of the light\n    int hit_prim_index;\n    // TODO is this used?\n    int hit_material_index;\n    float hit_distance;\n\n    float2 hit_interpolated_texcoords;\n    float3 hit_shading_normal;\n    float3 hit_geometric_normal;\n\n    ColorRGB32F hit_emission;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/Common.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_KERNEL_OPTIONS_COMMON_H\n#define HOST_DEVICE_COMMON_KERNEL_OPTIONS_COMMON_H\n\n#define KERNEL_OPTION_FALSE 0\n#define KERNEL_OPTION_TRUE 1\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/DirectLightSamplingOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_DIRECT_LIGHT_SAMPLING_OPTIONS_H\n#define HOST_DEVICE_COMMON_DIRECT_LIGHT_SAMPLING_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/Common.h\"\n\n#define LSS_NO_DIRECT_LIGHT_SAMPLING 0\n#define LSS_ONE_LIGHT 1\n#define LSS_BSDF 2\n#define LSS_MIS_LIGHT_BSDF 3\n#define LSS_RIS_BSDF_AND_LIGHT 4\n#define LSS_RESTIR_DI 5\n\n#define LSS_BASE_UNIFORM 0\n#define LSS_BASE_POWER 1\n#define LSS_BASE_REGIR 2\n\n// This block is a security to make sure that we have everything defined otherwise this can lead\n// to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n /**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n/**\n* What direct lighting sampling strategy to use.\n*\n* Possible values (the prefix LSS stands for \"Light Sampling strategy\"):\n*\n*\t- LSS_NO_DIRECT_LIGHT_SAMPLING\n*\t\tNo direct light sampling. Emission is only gathered if rays happen to bounce into the lights.\n*\n*\t- LSS_ONE_LIGHT\n*\t\tSamples one random light in the scene without MIS.\n*\t\tEfficient as long as there are not too many lights in the scene and no glossy surfaces\n*\n*  - LSS_BSDF\n*\t\tSamples lights only using a BSDF sample\n*\t\tEfficient as long as light sources in the scene are large\n*\n*\t- LSS_MIS_LIGHT_BSDF\n*\t\tSamples one random light in the scene with MIS (Multiple Importance Sampling): light sample + BRDF sample\n*\n*\t- LSS_RIS_BSDF_AND_LIGHT\n*\t\tSamples lights in the scene with Resampled Importance Sampling\n*\n*\t- LSS_RESTIR_DI\n*\t\tUses ReSTIR DI to sample direct lighting at the first bounce in the scene.\n*\t\tLater bounces use the strategy given by ReSTIR_DI_LaterBouncesSamplingStrategy\n*/\n#define DirectLightSamplingStrategy LSS_RIS_BSDF_AND_LIGHT\n\n/**\n* How to sample lights in the scene.\n* This directly affects the 'DirectLightSamplingStrategy' strategies that sample lights\n*\n*\t- LSS_BASE_UNIFORM\n*\t\tLights are sampled uniformly\n*\n*\t- LSS_BASE_POWER\n*\t\tLights are sampled proportionally to their power\n*\n*\t- LSS_BASE_REGIR\n*\t\tUses ReGIR to sample lights\n*\t\tImplementation of [Rendering many lights with grid-based reservoirs, Boksansky, 2021]\n*/\n#define DirectLightSamplingBaseStrategy LSS_BASE_POWER\n\n/**\n * How many light samples to take and shade per each vertex of the\n * ray's path.\n * \n * Said otherwise, we're going to run next-event estimation that many\n * times per each intersection point along the ray.\n * \n * This is good because this amortizes camera rays and bounce rays i.e.\n * we get better shading quality for as many camera rays and bounce rays\n * \n * This is not supported by ReSTIR DI because this would require recomputing\n * a new reservoir = full re-run of ReSTIR = too expensive.\n * It does apply to the secondary bounces shading when using ReSTIR DI for the\n * primary bounce though.\n */ \n#define DirectLightSamplingNEESampleCount 1\n\n/**\n* If this is true, light sampling with NEE (emissive geometry & envmap) will not even\n* be attempted on perfectly smooth materials (smooth glass, smooth metals, ...)\n*\n* This is because these materials are delta distributions and light sampling\n* has no chance to give any contribution.\n*\n* There is no point in disabling that option, this is basically only for\n* performance comparisons\n*/\n#define DirectLightSamplingDeltaDistributionOptimization KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to allow backfacing lights during NEE evaluation.\n * \n * For most scenes, this is going to have no impact on visuals as lights are generally\n * watertight meshes, meaning that backfacing emissive triangles of those meshes are not visible from\n * the outside. There will thus be no visual difference but a non negligeable boost in \n * performance/sampling quality as backfacing lights will not be sampled anymore (depending on the sampling strategy)\n */\n#define DirectLightSamplingAllowBackfacingLights KERNEL_OPTION_FALSE\n\n#endif // #ifndef __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/GMoNOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_KERNEL_OPTIONS_GMON_OPTIONS_H\n#define HOST_DEVICE_COMMON_KERNEL_OPTIONS_GMON_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/Common.h\"\n\n/**\n * Kernel options for the implementation of GMoN\n * \n * Reference:\n * [1] [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\n */\n\n // This block is a security to make sure that we have everything defined otherwise this can lead\n // to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n /**\n  * Options are defined in a #ifndef __KERNELCC__ block because:\n  *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n  *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n  *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n  * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n  */\n#ifndef __KERNELCC__\n\n/**\n * How many sets to use for GMoN. M variable in the paper\n */\n#define GMoNMSetsCount 11\n\n#endif // #ifndef __KERNELCC__\n\n// The options below are not in the \"#ifndef __KERNELCC__\" guard because they cannot change at runtime\n// so we're not passing them as options to the compiler with -D so they need to be know in the\n// source file at compile time\n/**\n * Thread block size dispatched when computing the G-Median of Means per each pixel\n */\n#define GMoNComputeMeansKernelThreadBlockSize 8\n\n /**\n  * How many bits to use to sort the means with a radix sort\n  */\n#define GMoNKeysNbDigitsForRadixSort 32\n\n/**\n * What radix is used for the radix sort of the means\n */\n#define GMoNSortRadixSize 2\n\n#endif // #ifndef HOST_DEVICE_COMMON_KERNEL_OPTIONS_GMON_OPTIONS_H\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/KernelOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_KERNEL_OPTIONS_H\n#define HOST_DEVICE_COMMON_KERNEL_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/DirectLightSamplingOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/GMoNOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/NEEPlusPlusOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/PrincipledBSDFKernelOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/ReGIROptions.h\"\n#include \"HostDeviceCommon/KernelOptions/ReSTIRDIOptions.h\"\n#include \"HostDeviceCommon/KernelOptions/ReSTIRGIOptions.h\"\n\n/**\n * This file references the path tracer options that can be passed to HIPCC using the -D <macro>=<value> option.\n * These path tracer options allow \"compile-time\" branching to enable/disable a variety\n * of functionalities in the path tracer.\n * \n * For example, you can decide, at kernel compile-time, what envmap sampling strategy to use \n *\t- \"CDF + Binary search\"\n *\t- \"Alias table\"\n * by passing the \"-D EnvmapSamplingStrategy=1\" or \"-D EnvmapSamplingStrategy=2\" option string during\n * the compilation of the kernel (for \"CDF\" and \"alias table\" respectively).\n * \n * If you wish to change one of the option used by the path tracer at runtime (by interacting with\n * ImGui for example), you will have to recompile the kernel with the correct set of options\n * passed to the kernel compiler.\n * \n * The advantage of recompiling the entire kernel over branching with a simple if() condition on\n * a variable (that would be passed in RenderData for example) is that the recompiling approach\n * does not incur an additional register cost that would harm the occupancy potential of the kernel\n * (whereas registers may be allocated for the block {} of the if() conditions since the compiler\n * has no way to know which branch of the if is going to be taken at runtime).\n */\n\n/**\n * Those are simple defines to give names to the option values.\n * This allows the use of LSS_ONE_RANDOM_LIGHT_MIS (for example) instead of a hardcoded '2'\n */\n#define BSDF_NONE 0\n#define BSDF_LAMBERTIAN 1\n#define BSDF_OREN_NAYAR 2\n#define BSDF_PRINCIPLED 3\n\n#define NESTED_DIELECTRICS_STACK_SIZE 4\n\n#define TRIANGLE_POINT_SAMPLING_TURK_1990 0\n#define TRIANGLE_POINT_SAMPLING_HEITZ_2019 1\n\n#define ESS_NO_SAMPLING 0\n#define ESS_BINARY_SEARCH 1\n#define ESS_ALIAS_TABLE 2\n\n#define PSS_BSDF 0\n#define PSS_RESTIR_GI 1\n\n/**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n/**\n * Whether or not to use shared memory and a global buffer for BVH traversal of global rays (no maximum distance).\n * \n * This improves performance at the cost of a higher VRAM usage (because of the global buffer needed)\n */\n#define UseSharedStackBVHTraversal KERNEL_OPTION_TRUE\n\n/**\n * Size of the thread blocks for all kernels dispatched by this renderer\n */\n#define KernelBlockWidthHeight 8\n\n/**\n * Size of the thread blocks used when dispatching the kernels. \n * This value is used for allocating the shared memory stack for traversal\n */\n#define KernelWorkgroupThreadCount (KernelBlockWidthHeight * KernelBlockWidthHeight)\n\n /**\n  * Size of the shared memory stack for BVH traversal of \"global\" rays \n  * (rays that search for the closest hit with no maximum distance)\n  */\n#define SharedStackBVHTraversalSize 16\n\n/**\n * Partial and (very) experimental implementation of [Generate Coherent Rays Directly, Liu et al., 2024]\n * for reuse sampled directions on the first hit accross the threads of warps\n */\n#define DoFirstBounceWarpDirectionReuse KERNEL_OPTION_FALSE\n\n/**\n * Allows the overriding of the BRDF/BSDF used by the path tracer. When an override is used,\n * the material retains its properties (color, roughness, ...) but only the parameters relevant\n * to the overriden BSDF are used.\n * \n *\t- BSDF_NONE\n *\t\tMaterials will use their default BRDF/BSDF, no override\n * \n *\t- BSDF_LAMBERTIAN\n *\t\tAll materials will use a lambertian BRDF\n * \n *\t- BSDF_OREN_NAYAR\n *\t\tAll materials will use the Oren Nayar diffuse BRDF\n * \n *\t- BSDF_PRINCIPLED\n *\t\tAll materials will use the Principled BSDF\n */\n#define BSDFOverride BSDF_NONE\n\n/**\n * The stack size for handling nested dielectrics\n */\n#define NestedDielectricsStackSize NESTED_DIELECTRICS_STACK_SIZE\n\n/**\n * How to randomly sample a point on a triangle\n * \n *\t- TRIANGLE_POINT_SAMPLING_TURK_1990\n *\t\tCommon way of warping from a square to a triangle using square roots:\n *\t\tV = (1.0f - sqrt(u1)) * V1 + sqrt(u1) * (s2 * V2 + (1.0f - s2) * V3)\n * \n *\t- TRIANGLE_POINT_SAMPLING_HEITZ_2019\n *\t\tImplementation of [A Low-Distortion Map Between Triangle and Square, Heitz, 2019]\n *\t\tIt is faster than Turk method's and better perserves the stratification of the random\n *\t\tnumber samplers\n */\n#define TrianglePointSamplingStrategy TRIANGLE_POINT_SAMPLING_HEITZ_2019\n\n/**\n * What envmap sampling strategy to use\n * \n * Possible values (the prefix ESS stands for \"Envmap Sampling Strategy\"):\n * \n *\t- ESS_NO_SAMPLING\n *\t\tNo importance sampling of the envmap\n * \n *\t- ESS_BINARY_SEARCH\n *\t\tImportance samples a texel of the environment map proportionally to its\n *\t\tluminance using a binary search on the CDF distributions of the envmap luminance.\n *\t\tGood convergence.\n * \n * - ESS_ALIAS_TABLE\n *\t\tImportance samples a texel of the environment map proportionally to its\n *\t\tluminance using an alias table for constant time sampling\n *\t\tGood convergence and faster than ESS_BINARY_SEARCH\n */\n#define EnvmapSamplingStrategy ESS_ALIAS_TABLE\n\n/**\n * Whether or not to do Muliple Importance Sampling between the envmap sample and a BSDF\n * sample when importance sampling direct lighting contribution from the envmap\n */\n#define EnvmapSamplingDoBSDFMIS KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to do bilinear filtering when sampling the envmap.\n * \n * This is mostly useful when the camera is looking straigth at the envmap and we don't \n * have camera ray jittering on: in this case, bilinear filtering will hide the \n * pixelated look of the envmap.\n */\n#define EnvmapSamplingDoBilinearFiltering KERNEL_OPTION_FALSE\n\n/**\n * What sampling strategy to use for sampling the bounces during path tracing.\n * \n *\t- PSS_BSDF\n *\t\tThe classical technique: importance samples the BSDF and bounces in that direction\n * \n *\t- PSS_RESTIR_GI\n *\t\tUses ReSTIR GI for resampling a path for the pixel.\n * \n *\t\tThe implementation is based on \n *\t\t[ReSTIR GI: Path Resampling for Real-Time Path Tracing] https://research.nvidia.com/publication/2021-06_restir-gi-path-resampling-real-time-path-tracing\n *\t\tbut is adapted almost full unbiasedness (full unbiasedness while resampling full path trees as in ReSTIR GI paper isn't supported\n *\t\tby the GRIS theory. Fully unbiased path resampling with the current RIS theory can only be achieved by resampling \"paths\" and not full \"path trees\" as proposed\n *\t\tin the ReSTIR GI paper and as implemented here)\n * \n *\t\tThe original ReSTIR GI paper indeed only is unbiased for a Lambertian BRDF\n */\n#define PathSamplingStrategy PSS_BSDF\n\n/**\n * Whether or not to use a visiblity term in the target function whose PDF we're\n * approximating with RIS.\n * Only applies for pure RIS direct lighting strategy (i.e. not RIS used by ReSTIR\n * on the initial candidates pass for example)\n * \n *\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n */\n#define RISUseVisiblityTargetFunction KERNEL_OPTION_FALSE\n\n/**\n * Debug option which, if enabled, only outputs the sample 'render_settings.output_debug_sample_N'\n * to the framebuffer.\n * \n * Useful for debugging features that may take effect after the first sample and we only want to see what\n * the second sample looks like without the accumulation\n */\n#define DisplayOnlySampleN KERNEL_OPTION_FALSE\n\n#endif // #ifndef __KERNELCC__\n\n/**\n * This is a handy macro that tells us whether or not we have any other kernel option \n * that overrides the color of the framebuffer\n */\n#define ViewportColorOverriden (\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\\\n\t(NEEPlusPlusDebugMode != NEE_PLUS_PLUS_DEBUG_MODE_NO_DEBUG || DirectLightNEEPlusPlusDisplayShadowRaysDiscarded == KERNEL_OPTION_TRUE) ||\t\\\n\t(DirectLightSamplingBaseStrategy == LSS_BASE_REGIR && ReGIR_DebugMode != REGIR_DEBUG_MODE_NO_DEBUG))\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/NEEPlusPlusOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_NEE_PLUS_PLUS_OPTIONS_H\n#define HOST_DEVICE_COMMON_NEE_PLUS_PLUS_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/Common.h\"\n\n// This block is a security to make sure that we have everything defined otherwise this can lead\n// to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n#define NEE_PLUS_PLUS_DEBUG_MODE_NO_DEBUG 0\n#define NEE_PLUS_PLUS_DEBUG_MODE_GRID_CELLS 1\n\n/**\n * The resolution downscale factor to apply for the ReGIR grid prepopulation.\n *\n * The lower the downscale, the more effective the prepoluation but also the more costly\n */\n#define NEEPlusPlus_GridPrepoluationResolutionDownscale 2\n\n/**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n/**\n* Whether or not to use NEE++ features at all\n*/\n#define DirectLightUseNEEPlusPlus KERNEL_OPTION_FALSE\n\n/**\n* Whether or not to use russian roulette to avoid tracing shadow rays based on the visibility\n* information of NEE++\n*/\n#define DirectLightUseNEEPlusPlusRR KERNEL_OPTION_FALSE\n\n/**\n* This a debug option to visualize shadow rays discarded by the NEE++ russian roulette\n*/\n#define DirectLightNEEPlusPlusDisplayShadowRaysDiscarded KERNEL_OPTION_FALSE\n\n/**\n* When using the 'DirectLightNEEPlusPlusDisplayShadowRaysDiscarded' kernel options\n* for displaying in the viewport where shadow rays were discarded, this parameter is used\n* to determine at what bounce in the scene we should display the shadow ray discarded or not\n*\n* 0 is the first hit\n*/\n#define DirectLightNEEPlusPlusDisplayShadowRaysDiscardedBounce 0\n\n/**\n * Maximum number of steps for the linear probing of the NEE++ hash grid\n */\n#define NEEPlusPlus_LinearProbingSteps 4\n\n/**\n * Debug mode for displaying some debug infos about NEE++\n */\n#define NEEPlusPlusDebugMode NEE_PLUS_PLUS_DEBUG_MODE_NO_DEBUG\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/PrincipledBSDFKernelOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_PRINCIPLED_BSDF_KERNEL_OPTIONS_H\n#define HOST_DEVICE_COMMON_PRINCIPLED_BSDF_KERNEL_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/Common.h\"\n\n/**\n * This file references the path tracer options that can be passed to HIPCC/NVCC using the -D <macro>=<value> option.\n * These path tracer options allow \"compile-time\" branching to enable/disable a variety\n * of functionalities in the path tracer.\n * \n * For example, you can decide, at kernel compile-time, what envmap sampling strategy to use \n *\t- \"CDF + Binary search\"\n *\t- \"Alias table\"\n * by passing the \"-D EnvmapSamplingStrategy=1\" or \"-D EnvmapSamplingStrategy=2\" option string during\n * the compilation of the kernel (for \"CDF\" and \"alias table\" respectively).\n * \n * If you wish to change one of the option used by the path tracer at runtime (by interacting with\n * ImGui for example), you will have to recompile the kernel with the correct set of options\n * passed to the kernel compiler.\n * \n * The advantage of recompiling the entire kernel over branching with a simple if() condition on\n * a variable (that would be passed in RenderData for example) is that the recompiling approach\n * does not incur an additional register cost that would harm the occupancy potential of the kernel\n * (whereas registers may be allocated for the block {} of the if() conditions since the compiler\n * has no way to know which branch of the if is going to be taken at runtime).\n */\n\n/**\n * Those are simple defines to give names to the option values.\n * This allows the use of LSS_ONE_RANDOM_LIGHT_MIS (for example) instead of a hardcoded '2'\n */\n#define PRINCIPLED_DIFFUSE_LOBE_LAMBERTIAN 0\n#define PRINCIPLED_DIFFUSE_LOBE_OREN_NAYAR 1\n\n#define GGX_VNDF_SAMPLING 0\n#define GGX_VNDF_SPHERICAL_CAPS 1\n#define GGX_VNDF_BOUNDED 2\n\n // This block is a security to make sure that we have everything defined otherwise this can lead\n // to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n/**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n/**\n * What diffuse lobe to use in the principled BSDF.\n * \n *\t- PRINCIPLED_DIFFUSE_LOBE_LAMBERTIAN\n *\t\tUse a lambertian BRDF for the diffuse lobe\n * \n *\t- PRINCIPLED_DIFFUSE_LOBE_OREN_NAYAR\n *\t\tUse an Oren-Nayar BRDF for the diffuse lobe\n */\n#define PrincipledBSDFDiffuseLobe PRINCIPLED_DIFFUSE_LOBE_LAMBERTIAN\n\n/**\n * What sampling strategy to use for the GGX NDF\n *\n *  - GGX_NO_VNDF [Not Yet Implemented]\n *\t\tNot sampling the visible distribution of normals.\n *\t\tJust classic GGX sampling\n *\n *  - GGX_VNDF_SAMPLING\n *\t\tSample the distribution of visible normals as proposed\n *\t\tin [Sampling the GGX Distribution of Visible Normals, Heitz, 2018]\n *\n *  - GGX_VNDF_SPHERICAL_CAPS\n *\t\tSample the distribution of visible normals using spherical\n *\t\tcaps as proposed in [Sampling Visible GGX Normals with Spherical Caps, Dupuy & Benyoub, 2023]\n *\n *  - GGX_VNDF_BOUNDED [Not Yet Implemented]\n *\t\tSample the distribution of visible normals with a bounded VNDF\n *\t\tsampling range as proposed in [Bounded VNDF Sampling for Smith-GGX Reflections, Eto & Tokuyoshi, 2023]\n *\n */\n#define PrincipledBSDFAnisotropicGGXSampleFunction GGX_VNDF_SAMPLING\n\n/**\n * Whether or not to use multiple scattering to conserve energy when evaluating\n * GGX BRDF lobes in the Principled BSDF\n * \n * This is implemented by following \n * [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n * \n * Possible options are KERNEL_OPTION_TRUE and KERNEL_OPTION_FALSE. Self explanatory.\n */\n#define PrincipledBSDFDoEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to perform energy compensation for the glass layer of the Principled BSDF\n */\n#define PrincipledBSDFDoGlassEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to perform energy compensation (it's an approximation for the clearcoat\n * layer, it's not perfect but very good in most cases) for the clearcoat layer of the Principled BSDF\n */\n#define PrincipledBSDFDoClearcoatEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to perform energy compensation for the metallic layer of the Principled BSDF\n */\n#define PrincipledBSDFDoMetallicEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to use multiple scattering to conserve energy and use a\n * Fresnel compensation term i.e. account for Fresnel when light scatters multiple\n * times on the microsurface. This increases saturation and has a noticeable impact.\n * Only applies to conductors. This term always is implicitely used for dielectrics\n *\n * This is implemented by following\n * [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n *\n * Possible options are KERNEL_OPTION_TRUE and KERNEL_OPTION_FALSE. Self explanatory.\n */\n#define PrincipledBSDFDoMetallicFresnelEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to perform energy compensation for the specular/diffuse layer of the Principled BSDF\n */\n#define PrincipledBSDFDoSpecularEnergyCompensation KERNEL_OPTION_TRUE\n\n/**\n * If this is true, then delta distribution lobes of the principled BSDF will not be evaluated\n * if the incident light direction used for the evaluation doesn't come from sampling the \n * delta distribution lobe itself\n * \n * Some more details in BSDFIncidentLightInfo.h\n */\n#define PrincipledBSDFDeltaDistributionEvaluationOptimization KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to sample the glossy/diffuse base layer of the BSDF based on the fresnel or not.\n * \n * This means that the diffuse layer will be sampled more often at normal incidence since this is where\n * the specular layer reflects close to no light.\n * \n * At grazing angle however, where the specular layer reflects the most light (and so the diffuse layer \n * below isn't reached by that light that is reflected by the specular layer), it is the specular layer\n * that will be sampled more often.\n */\n#define PrincipledBSDFSampleGlossyBasedOnFresnel KERNEL_OPTION_FALSE\n\n/**\n * Same PrincipledBSDFSampleGlossyBasedOnFresnel but for the coat layer\n */\n#define PrincipledBSDFSampleCoatBasedOnFresnel KERNEL_OPTION_FALSE\n\n/**\n * Implementation of [Microfacet Model Regularization for Robust Light Transport, Jendersie et al. 2019]\n * for regularizing (roughening) microfacet materials and help with caustics rendering\n */\n#define PrincipledBSDFDoMicrofacetRegularization KERNEL_OPTION_TRUE\n\n/**\n * For microfacet model regularization, whether or not the use the consistent parametertization for tau_0 as\n * given by equation 16 of the paper\n */\n#define PrincipledBSDFDoMicrofacetRegularizationConsistentParameterization KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to take the path's roughness into account when regularizing the BSDFs\n */\n#define PrincipledBSDFMicrofacetRegularizationDiffusionHeuristic KERNEL_OPTION_TRUE\n\n#endif // #ifndef __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/ReGIROptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_REGIR_OPTIONS_H\n#define HOST_DEVICE_COMMON_REGIR_OPTIONS_H\n\n#include \"HostDeviceCommon/KernelOptions/DirectLightSamplingOptions.h\"\n\n#define REGIR_DEBUG_MODE_NO_DEBUG 0\n#define REGIR_DEBUG_MODE_GRID_CELLS 1\n#define REGIR_DEBUG_MODE_AVERAGE_CELL_NON_CANONICAL_RESERVOIR_CONTRIBUTION 2\n#define REGIR_DEBUG_MODE_AVERAGE_CELL_CANONICAL_RESERVOIR_CONTRIBUTION 3\n#define REGIR_DEBUG_MODE_REPRESENTATIVE_POINTS 4\n#define REGIR_DEBUG_MODE_REPRESENTATIVE_NORMALS 5\n#define REGIR_DEBUG_MODE_SAMPLING_FALLBACK 6\n\n#define REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE_LINEAR_PROBING 0\n#define REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE_REHASHING 1\n\n // This block is a security to make sure that we have everything defined otherwise this can lead\n // to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n/**\n * The resolution downscale factor to apply for the ReGIR grid prepopulation.\n * \n * The lower the downscale, the more effective the prepoluation but also the more costly\n */\n#define ReGIR_GridPrepopulationResolutionDownscale 1\n\n/**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n/**\n* How to sample lights in the scene for filling the ReGIR grid.\n*\n*\t- LSS_BASE_UNIFORM\n*\t\tLights are sampled uniformly\n*\n*\t- LSS_BASE_POWER\n*\t\tLights are sampled proportionally to their power\n*/\n#define ReGIR_GridFillLightSamplingBaseStrategy LSS_BASE_POWER\n\n/**\n * Whether or not to use a visibility term in the target function used to resample the reservoirs of the grid cells.\n * \n * Probably too expensive to be efficient.\n */\n#define ReGIR_GridFillTargetFunctionVisibility KERNEL_OPTION_FALSE\n\n/**\n * Whether or not to use a the cosine term between the direction to the light sample and the\n * representative normal of the grid cell in the target function used to resample the reservoirs of the grid cells.\n *\n * This has no effect is representative points are not being used\n */\n#define ReGIR_GridFillTargetFunctionCosineTerm KERNEL_OPTION_TRUE\n\n/**\n * Takes the cosine term at the light source (i.e. the cosine term of the geometry term) into account when\n * evaluating the target function during grid fill\n */\n#define ReGIR_GridFillTargetFunctionCosineTermLightSource KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to include the BSDF in the target function used for the resampling of the initial candidates\n * for the grid fill.\n * \n * Helps a lot on glossy surfaces.\n * \n * This option applies to primary hits only and should generally be set to true for better sampling.\n */\n#define ReGIR_GridFillPrimaryHitsTargetFunctionBSDF KERNEL_OPTION_TRUE\n\n/**\n * Same as 'ReGIR_GridFillPrimaryHitsTargetFunctionBSDF' but only applies to secondary hits\n * \n * This option should be set to false in general as we cannot guess in advance what the view direction is going\n * to be at secondary hits (since they can come from anywhere when the rays bounce around the scene) and thus we\n * cannot properly evaluate the BRDF for sampling lights.\n */\n#define ReGIR_GridFillSecondaryHitsTargetFunctionBSDF KERNEL_OPTION_FALSE\n\n/**\n * Whether or not to estimate the visibility probability of samples with NEE++ during the grid fill.\n */\n#define ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation KERNEL_OPTION_TRUE\n\n/**\n * This option must be set to true and a grid fill + spatial reuse kernels compiled with this option set\n * to true for those passes to accumulate the RIS integral of the reservoirs (for use in MIS)\n */\n#define ReGIR_GridFillSpatialReuse_AccumulatePreIntegration KERNEL_OPTION_FALSE\n\n/**\n * Whether or not to enable light presampling to improve grid fill performance\n * on scenes with many many lights\n */\n#define ReGIR_GridFillDoLightPresampling KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to use a shadow ray in the target function when shading a point at path tracing time.\n * This reduces visibility noise\n */\n#define ReGIR_ShadingResamplingTargetFunctionVisibility KERNEL_OPTION_FALSE\n\n/**\n * Whether or not to use NEE++ to estimate the visibility probability of the reservoir being resampled during\n * shading such that reservoirs that are likely to be occluded will have a lower resampling probability\n * \n * This option is exclusive with ReGIR_ShadingResamplingTargetFunctionVisibility, the latter taking precedence.\n */\n#define ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to jitter canonical candidates during the shading resampling.\n * This reduces grid artifacts but increases variance\n */\n#define ReGIR_ShadingResamplingJitterCanonicalCandidates KERNEL_OPTION_FALSE\n\n/**\n * Whether or not to incorporate BSDF samples with MIS during shading resampling.\n */\n#define ReGIR_ShadingResamplingDoBSDFMIS KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to use Pairwise MIS weights for weighting the different samples at shading-resampling time.\n * \n * If this is false, 1/Z MIS weights will be used instead which are potentially faster but definitely have more variance.\n */\n#define ReGIR_ShadingResamplingDoMISPairwiseMIS KERNEL_OPTION_TRUE\n\n/**\n * If true, all samples resampled will be shaded instead of shading only the reservoir result of the resampling.\n * \n * This massively improves quality at the cost of performance and is very likely to be worth it for scenes that are not\n * too hard to trace (where shadow rays are expensive).\n */\n#define ReGIR_ShadingResamplingShadeAllSamples KERNEL_OPTION_TRUE\n\n/**\n * Light sampling technique used in case the position that we are shading is falling outside of the ReGIR grid\n * \n * All LSS_BASE_XXX strategies are allowed except LSS_BASE_REGIR\n */\n#define ReGIR_FallbackLightSamplingStrategy LSS_BASE_POWER\n\n/**\n * Whether or not to increase the hash grid precision on surfaces that have a lower roughness\n * such that the BRDF term in the target function of the grid fill (if used at all) has a higher\n * precision and gives better results\n */\n#define ReGIR_HashGridAdaptiveRoughnessGridPrecision KERNEL_OPTION_TRUE\n\n/**\n * Whether or not to use constant grid cell size for the hash grid.\n * \n * If this is false, the grid cell size will increase (cells gets bigger) the further away\n * from the camera. This can help with performance and the number of resident cells\n * in the hash grid but it tends to hurt quality because of the reduced grid cell resolution\n */\n#define ReGIR_HashGridConstantGridCellSize KERNEL_OPTION_FALSE\n\n/**\n *  How to resolve a collision found in the hash grid:\n * \n * - REGIR_HASH_GRID_COLLISION_RESOLUTION_LINEAR_PROBING: If a collision is found, look up the next index in the hash\n *      table and see if that location is empty. If not empty, continue looking at the next location\n *      up to 'ReGIR_HashGridCollisionResolutionMaxSteps' times\n * \n * - REGIR_HASH_GRID_COLLISION_RESOLUTION_REHASHING: If a collision is found, hash the current cell index to get the\n *      new candidate location. Continue doing so until an empty location is found or 'ReGIR_HashGridCollisionResolutionMaxSteps'\n *      steps is exceeded\n */\n#define ReGIR_HashGridCollisionResolutionMode REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE_LINEAR_PROBING\n\n/**\n * Maximum number of steps for the linear probing in the hash table to resolve collisions\n */\n#define ReGIR_HashGridCollisionResolutionMaxSteps 32\n\n/**\n * Whether or not to use the surface normal in the hash function of the hash grid\n */\n#define ReGIR_HashGridHashSurfaceNormal KERNEL_OPTION_TRUE\n\n\n/** \n * The number of discretization steps used to hash the surface normal\n * The higher the number, the better the hash grid resolution but the higher the\n * memory cost of the grid and the computational cost of the grid fill\n */\n#define ReGIR_HashGridHashSurfaceNormalResolutionPrimaryHits 4\n\n/**\n * Same as above but for the secondary hits only. A lower setting here is usually enough and saves\n * on perf and VRAM\n */\n#define ReGIR_HashGridHashSurfaceNormalResolutionSecondaryHits 2\n\n/**\n * If using jittering, how many tries to perform to find a good neighbor at shading time?\n *\n * This is because with jittering, our jittered position may end up outside of the grid\n * or in an empty cell, in which case we want to retry with a differently jittered position\n * to try and find a good neighbor\n */\n#define ReGIR_ShadingJitterTries 2\n\n/**\n * Debug option to color the scene with the grid cells\n */\n#define ReGIR_DebugMode REGIR_DEBUG_MODE_NO_DEBUG\n\n#endif // #ifndef __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/ReSTIRDIOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RESTIR_DI_OPTIONS_H\n#define HOST_DEVICE_COMMON_RESTIR_DI_OPTIONS_H\n\n#define RESTIR_DI_BIAS_CORRECTION_1_OVER_M 0\n#define RESTIR_DI_BIAS_CORRECTION_1_OVER_Z 1\n#define RESTIR_DI_BIAS_CORRECTION_MIS_LIKE 2\n#define RESTIR_DI_BIAS_CORRECTION_MIS_GBH 3\n#define RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS 4\n#define RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE 5\n#define RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO 6\n#define RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO 7\n\n#define RESTIR_DI_LATER_BOUNCES_UNIFORM_ONE_LIGHT 0\n#define RESTIR_DI_LATER_BOUNCES_BSDF 1\n#define RESTIR_DI_LATER_BOUNCES_MIS_LIGHT_BSDF 2\n#define RESTIR_DI_LATER_BOUNCES_RIS_BSDF_AND_LIGHT 3\n\n#define RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT 64 // CHANGE THIS ONE TO MODIFY THE NUMBER OF BITS.\n\n // This block is a security to make sure that we have everything defined otherwise this can lead\n // to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n/**\n* Options are defined in a #ifndef __KERNELCC__ block because:\n*\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n*\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n*\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n* - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n*/\n#ifndef __KERNELCC__\n\n/**\n* Whether or not to use a visibility term in the target function when resampling\n* initial candidates in ReSTIR DI. *\n*\n*\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n*/\n#define ReSTIR_DI_InitialTargetFunctionVisibility KERNEL_OPTION_FALSE\n\n/**\n* Whether or not to use a visibility term in the target function when resampling\n* samples in ReSTIR DI. This applies to the spatial reuse pass only.\n* This option can have a good impact on quality and be worth it in terms of cost.\n*\n*\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n*/\n#define ReSTIR_DI_SpatialTargetFunctionVisibility KERNEL_OPTION_FALSE\n\n/**\n* Whether or not to do a visibility check at the end of the initial candidates sampling.\n* This discards reservoirs (by setting their UCW to 0.0f) whose samples are occluded.\n* This allows following ReSTIR passes (temporal and spatial) to only resample on samples\n* that are not occluded which improves quality quite a bit.\n*\n*\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n*/\n#define ReSTIR_DI_DoVisibilityReuse KERNEL_OPTION_TRUE\n\n/**\n* Whether or not to use a visibility term in the MIS weights (MIS-like weights,\n* generalized balance heuristic, pairwise MIS, ...) used to remove bias when\n* resampling neighbors. An additional visibility ray will be traced for MIS-weight\n* evaluated. This effectively means for each neighbor resamples or (for each neighbor resampled)^2\n* if using the generalized balance heuristics (without pairwise-MIS)\n*\n* To guarantee unbiasedness, this needs to be true. A small amount of energy loss\n* may be observed if this value is KERNEL_OPTION_FALSE but the performance cost of the spatial\n* reuse will be reduced noticeably\n*\n*\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n*/\n#define ReSTIR_DI_BiasCorrectionUseVisibility KERNEL_OPTION_TRUE\n\n/**\n* What bias correction weights to use when resampling neighbors (temporal / spatial)\n*\n*  - RESTIR_DI_BIAS_CORRECTION_1_OVER_M\n*\t\tVery simple biased weights as described in the 2020 paper (Eq. 6).\n*\t\tThose weights are biased because they do not account for cases where\n*\t\twe resample a sample that couldn't have been produced by some neighbors.\n*\t\tThe bias shows up as darkening, mostly at object boundaries. In GRIS vocabulary,\n*\t\tthis type of weights can be seen as confidence weights alone c_i / sum(c_j)\n*\n*  - RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n*\t\tSimple unbiased weights as described in the 2020 paper (Eq. 16 and Section 4.3)\n*\t\tThose weights are unbiased but can have **extremely** bad variance when a neighbor being resampled\n*\t\thas a very low target function (when the neighbor is a glossy surface for example).\n*\t\tSee Fig. 7 of the 2020 paper.\n*\n*  - RESTIR_DI_BIAS_CORRECTION_MIS_LIKE\n*\t\tUnbiased weights as proposed by Eq. 22 of the paper. Way better than 1/Z in terms of variance\n*\t\tand still unbiased.\n*\n*  - RESTIR_DI_BIAS_CORRECTION_MIS_GBH\n*\t\tUnbiased MIS weights that use the generalized balance heuristic. Very good variance reduction but O(N^2) complexity,\n\tN being the number of neighbors resampled.\n*\t\tEq. 36 of the 2022 Generalized Resampled Importance Sampling paper.\n*\n*\t- RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS (and the defensive version RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE)\n*\t\tSimilar variance reduction to the generalized balance heuristic and only O(N) computational cost.\n*\t\tSection 7.1.3 of \"A Gentle Introduction to ReSTIR\", 2023\n* \n*\t- RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO (and the defensive version RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO)\n*\t\tA bit more variance than pairwise MIS but way more robust to temporal correlations\n* \n*\t\tImplementation of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\n*/\n#define ReSTIR_DI_BiasCorrectionWeights RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS\n\n/**\n* What direct lighting sampling strategy to use for secondary bounces when ReSTIR DI is used for sampling the first bounce\n*\n* Possible values (the prefix LSS stands for \"Light Sampling strategy\"):\n*\n*\t- RESTIR_DI_LATER_BOUNCES_UNIFORM_ONE_LIGHT\n*\t\tSamples one random light in the scene without MIS\n*\n*\t- RESTIR_DI_LATER_BOUNCES_MIS_LIGHT_BSDF\n*\t\tSamples one random light in the scene with MIS (Multiple Importance Sampling): light sample + BRDF sample\n*\n*  - RESTIR_DI_LATER_BOUNCES_BSDF\n*\t\tSamples a light using a BSDF sample.\n*\t\tEfficient as long as the light sources in the scene are large.\n*\n*\t- RESTIR_DI_LATER_BOUNCES_RIS_BSDF_AND_LIGHT\n*\t\tSamples lights in the scene with Resampled Importance Sampling\n*/\n#define ReSTIR_DI_LaterBouncesSamplingStrategy RESTIR_DI_LATER_BOUNCES_RIS_BSDF_AND_LIGHT\n\n/**\n* If true, lights are presampled in a pre-process pass as described in\n* [Rearchitecting Spatiotemporal Resampling for Production, Wyman, Panteleev, 2021]\n* https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production.\n*\n* This improves performance in scenes with dozens of thousands / millions of\n* lights by avoiding cache trashing because of the memory random walk that\n* light sampling becomes with that many lights\n*/\n#define ReSTIR_DI_DoLightPresampling KERNEL_OPTION_FALSE\n\n/**\n * What light sampling strategy to use to presample lights\n * \n * \t- LSS_BASE_UNIFORM\n *\t\tLights are sampled uniformly\n *\n *\t- LSS_BASE_POWER\n *\t\tLights are sampled proportionally to their power\n */\n#define ReSTIR_DI_LightPresamplingStrategy LSS_BASE_POWER\n\n/**\n * How many bits to use for the directional reuse masks\n *\n * More bits use more VRAM but increase the precision of the directional reuse\n */\n#define ReSTIR_DI_SpatialDirectionalReuseBitCount (RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT > 64 ? 64 : RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT)\n\n/**\n * Technique presented in [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\n * \n * Helps with the pepper noise introduced by not using visibility in the spatial resampling target function\n */\n#define ReSTIR_DI_DoOptimalVisibilitySampling KERNEL_OPTION_FALSE\n\n#endif // #ifndef __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/KernelOptions/ReSTIRGIOptions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RESTIR_GI_OPTIONS_H\n#define HOST_DEVICE_COMMON_RESTIR_GI_OPTIONS_H\n\n#define RESTIR_GI_BIAS_CORRECTION_1_OVER_M 0\n#define RESTIR_GI_BIAS_CORRECTION_1_OVER_Z 1\n#define RESTIR_GI_BIAS_CORRECTION_MIS_LIKE 2\n#define RESTIR_GI_BIAS_CORRECTION_MIS_GBH 3\n#define RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS 4\n#define RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE 5\n#define RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO 6\n#define RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO 7\n\n#define RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT 64 // CHANGE THIS ONE TO MODIFY THE NUMBER OF BITS.\n\n // This block is a security to make sure that we have everything defined otherwise this can lead\n // to weird behavior because of the compiler not knowing about some macros\n#ifndef KERNEL_OPTION_TRUE\n#error \"KERNEL_OPTION_TRUE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#else\n#ifndef KERNEL_OPTION_FALSE\n#error \"KERNEL_OPTION_FALSE not defined, include 'HostDeviceCommon/KernelOptions/Common.h'\"\n#endif\n#endif\n\n /**\n * Options are defined in a #ifndef __KERNELCC__ block because:\n *\t- If they were not, the would be defined on the GPU side. However, the -D <macro>=<value> compiler option\n *\t\tcannot override a #define statement. This means that if the #define statement are encountered by the compiler,\n *\t\twe cannot modify the value of the macros anymore with the -D option which means no run-time switching / experimenting :(\n * - The CPU still needs the options to be able to compile the code so here they are, in a CPU-only block\n */\n#ifndef __KERNELCC__\n\n /**\n * Whether or not to use a visibility term in the target function when resampling\n * samples in ReSTIR GI. This applies to the spatial reuse pass only.\n *\n *\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n */\n#define ReSTIR_GI_SpatialTargetFunctionVisibility KERNEL_OPTION_FALSE\n\n/**\n* Whether or not to use a visibility term in the MIS weights (MIS-like weights,\n* generalized balance heuristic, pairwise MIS, ...) used to remove bias when\n* resampling neighbors. An additional visibility ray will be traced for MIS-weight\n* evaluated. This effectively means for each neighbor resampled or (for each neighbor resampled)^2\n* if using the generalized balance heuristics (without pairwise-MIS)\n*\n* To guarantee unbiasedness, this needs to be true. A small amount of energy loss\n* may be observed if this value is KERNEL_OPTION_FALSE but the performance cost of the spatial\n* reuse will be reduced noticeably\n*\n*\t- KERNEL_OPTION_TRUE or KERNEL_OPTION_FALSE values are accepted. Self-explanatory\n*/\n#define ReSTIR_GI_BiasCorrectionUseVisibility KERNEL_OPTION_TRUE\n\n/**\n* What bias correction weights to use when resampling neighbors (temporal / spatial)\n*\n*  - RESTIR_GI_BIAS_CORRECTION_1_OVER_M\n*\t\tVery simple biased weights as described in the 2020 paper (Eq. 6).\n*\t\tThose weights are biased because they do not account for cases where\n*\t\twe resample a sample that couldn't have been produced by some neighbors.\n*\t\tThe bias shows up as darkening, mostly at object boundaries. In GRIS vocabulary,\n*\t\tthis type of weights can be seen as confidence weights alone c_i / sum(c_j)\n*\n*  - RESTIR_GI_BIAS_CORRECTION_1_OVER_Z\n*\t\tSimple unbiased weights as described in the 2020 paper (Eq. 16 and Section 4.3)\n*\t\tThose weights are unbiased but can have **extremely** bad variance when a neighbor being resampled\n*\t\thas a very low target function (when the neighbor is a glossy surface for example).\n*\t\tSee Fig. 7 of the 2020 paper.\n*\n*  - RESTIR_GI_BIAS_CORRECTION_MIS_LIKE\n*\t\tUnbiased weights as proposed by Eq. 22 of the paper. Way better than 1/Z in terms of variance\n*\t\tand still unbiased.\n*\n*  - RESTIR_GI_BIAS_CORRECTION_MIS_GBH\n*\t\tUnbiased MIS weights that use the generalized balance heuristic. Very good variance reduction but O(N^2) complexity,\n\tN being the number of neighbors resampled.\n*\t\tEq. 36 of the 2022 Generalized Resampled Importance Sampling paper.\n*\n*\t- RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS (and the defensive version RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE)\n*\t\tSimilar variance reduction to the generalized balance heuristic and only O(N) computational cost.\n*\t\tSection 7.1.3 of \"A Gentle Introduction to ReSTIR\", 2023\n* \n* *\t- RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO (and the defensive version RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO)\n*\t\tA bit more variance than pairwise MIS but way more robust to temporal correlations\n* \n*\t\tImplementation of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\n*/\n#define ReSTIR_GI_BiasCorrectionWeights RESTIR_GI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE\n\n/**\n * How many bits to use for the directional reuse masks\n * \n * More bits use more VRAM but increase the precision of the directional reuse\n */\n#define ReSTIR_GI_SpatialDirectionalReuseBitCount (RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT > 64 ? 64 : RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_BIT_COUNT)\n\n /**\n  * Technique presented in [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\n  *\n  * Helps with the pepper noise introduced by not using visibility in the spatial resampling target function\n  */\n#define ReSTIR_GI_DoOptimalVisibilitySampling KERNEL_OPTION_FALSE\n\n#endif // #ifndef __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/LightSampleInformation.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_LIGHT_SAMPLE_INFORMATION_H\n#define HOST_DEVICE_COMMON_LIGHT_SAMPLE_INFORMATION_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n#include \"Device/includes/LightSampling/PDFConversion.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n\nstruct LightSampleInformation\n{\n    // Index of the triangle in the whole scene (not just in the emissive triangles buffer)\n    int emissive_triangle_index = -1;\n\n    float3 light_source_normal = { 0.0f, 1.0f, 0.0f };\n    float light_area = 1.0f;\n    ColorRGB32F emission;\n\n    float3 point_on_light = make_float3(0.0f, 0.0f, 0.0f);\n\n    float area_measure_pdf = 0.0f;\n\n    // The light sample may come from BSDF sampling (with ReGIR mostly) and so we may have\n\t// information about the lobe that was sampled.\n\tBSDFIncidentLightInfo incident_light_info = BSDFIncidentLightInfo::NO_INFO;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialCPU.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_CPU_H\n#define HOST_DEVICE_COMMON_MATERIAL_CPU_H\n\n#include \"Device/includes/NestedDielectrics.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n#include \"HostDeviceCommon/Material/MaterialUtils.h\"\n\n // Adding this guard to make sure that we never use the CPU materials in the GPU code\n#ifndef  __KERNELCC__\n\n// This material structure is only used on the CPU.\n// The reason why we have a CPU and a GPU material is because\n// we may want to precompute some properties on the CPU before sending them\n// to the GPU. This means that the CPU material stores both the precomputed values\n// and the values needed for the precomputation itself.\n//\n// But the GPU only cares about the precomputed values itself, not the ingredients\n// to the precomputation so that's why we have separate structures\nstruct CPUMaterial\n{\n    /**\n     * Function that transforms/packs this material to the version that the GPU is going to use\n     */\n    DevicePackedTexturedMaterial pack_to_GPU() const\n    {\n        DevicePackedTexturedMaterial mat;\n\n        mat.set_normal_map_texture_index(this->normal_map_texture_index);\n        mat.set_emission_texture_index(this->emission_texture_index);\n        mat.set_base_color_texture_index(this->base_color_texture_index);\n\n        mat.set_roughness_metallic_texture_index(this->roughness_metallic_texture_index);\n        mat.set_roughness_texture_index(this->roughness_texture_index);\n        mat.set_metallic_texture_index(this->metallic_texture_index);\n        mat.set_anisotropic_texture_index(this->anisotropic_texture_index);\n\n        mat.set_specular_texture_index(this->specular_texture_index);\n        mat.set_coat_texture_index(this->coat_texture_index);\n        mat.set_sheen_texture_index(this->sheen_texture_index);\n        mat.set_specular_transmission_texture_index(this->specular_transmission_texture_index);\n\n\n\n\n\n\n        mat.set_emission(emission * emission_strength * global_emissive_factor);\n        mat.set_emissive_texture_used(emissive_texture_used);\n\n        mat.set_base_color(base_color);\n\n        mat.set_roughness(roughness);\n        mat.set_oren_nayar_sigma(oren_nayar_sigma);\n\n        // Parameters for Adobe 2023 F82-tint model\n        mat.set_metallic(metallic);\n        mat.set_metallic_F90_falloff_exponent(metallic_F90_falloff_exponent);\n        // F0 is not here as it uses the 'base_color' of the material\n        mat.set_metallic_F82(metallic_F82);\n        mat.set_metallic_F90(metallic_F90);\n        mat.set_anisotropy(anisotropy);\n        mat.set_anisotropy_rotation(anisotropy_rotation);\n        mat.set_second_roughness_weight(second_roughness_weight);\n        mat.set_second_roughness(second_roughness);\n        mat.set_metallic_energy_compensation(do_metallic_energy_compensation);\n\n        // Specular intensity\n        mat.set_specular(specular);\n        // Specular tint intensity. \n        // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n        mat.set_specular_tint(specular_tint);\n        mat.set_specular_color(specular_color);\n        // Same as coat darkening but for total internal reflection inside the specular layer\n        // that sits on top of the diffuse base\n        //\n        // Disabled by default for artistic \"expectations\"\n        mat.set_specular_darkening(specular_darkening);\n        mat.set_specular_energy_compensation(do_specular_energy_compensation);\n\n        mat.set_coat(coat);\n        mat.set_coat_medium_absorption(coat_medium_absorption);\n        // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n        // that will happen inside the coat\n        mat.set_coat_medium_thickness(coat_medium_thickness);\n        mat.set_coat_roughness(coat_roughness);\n        // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n        // i.e. the specular/metallic/transmission layers.\n        // \n        // The option is however given here to artistically disable\n        // that behavior by using coat roughening = 0.0f.\n        mat.set_coat_roughening(coat_roughening);\n        // Because of the total internal reflection that can happen inside the coat layer (i.e.\n        // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n        // clearcoat will appear will increased saturation.\n        mat.set_coat_darkening(coat_darkening);\n        mat.set_coat_anisotropy(coat_anisotropy);\n        mat.set_coat_anisotropy_rotation(coat_anisotropy_rotation);\n        mat.set_coat_ior(coat_ior);\n        mat.set_coat_energy_compensation(do_coat_energy_compensation);\n\n        mat.set_sheen(sheen); // Sheen strength\n        mat.set_sheen_roughness(sheen_roughness);\n        mat.set_sheen_color(sheen_color);\n\n        mat.set_ior(ior);\n        mat.set_specular_transmission(specular_transmission);\n        mat.set_diffuse_transmission(diffuse_transmission);\n\n        // At what distance is the light absorbed to the given absorption_color\n        mat.set_absorption_at_distance(absorption_at_distance);\n        // Color of the light absorption when traveling through the medium\n        mat.set_absorption_color(absorption_color);\n        mat.set_dispersion_scale(dispersion_scale);\n        mat.set_dispersion_abbe_number(dispersion_abbe_number);\n        mat.set_thin_walled(thin_walled);\n        mat.set_glass_energy_compensation(do_glass_energy_compensation);\n\n        mat.set_thin_film(thin_film);\n        mat.set_thin_film_ior(thin_film_ior);\n        mat.set_thin_film_thickness(thin_film_thickness);\n        mat.set_thin_film_kappa_3(thin_film_kappa_3);\n        // Sending the hue film in [0, 1] to the GPU\n        mat.set_thin_film_hue_shift_degrees(thin_film_hue_shift_degrees / 360.0f);\n        mat.set_thin_film_base_ior_override(thin_film_base_ior_override);\n        mat.set_thin_film_do_ior_override(thin_film_do_ior_override);\n\n        // 1.0f makes the material completely opaque\n        // 0.0f completely transparent (becomes invisible)\n        mat.set_alpha_opacity(alpha_opacity);\n\n        // Nested dielectric parameter\n        mat.set_dielectric_priority(dielectric_priority);\n\n        return mat;\n    }\n\n    HIPRT_HOST_DEVICE bool is_emissive() const\n    {\n        return !hippt::is_zero(emission.r)\n            || !hippt::is_zero(emission.g)\n            || !hippt::is_zero(emission.b)\n            || emissive_texture_used;\n    }\n\n    /*\n     * Clamps some of the parameters of the material to avoid edge cases like NaNs\n     * during rendering (i.e. numerical instabilities)\n     */\n    HIPRT_HOST_DEVICE void make_safe()\n    {\n        // The values are going to be packed before being sent to the GPU\n        // Packing limits the range of values (most of them in [0, 1] because\n        // they are not expected to go higher) so we're clamping the values\n        // to avoid out-of-range-packing \n        base_color.clamp(0.0f, 1.0f);\n\n        metallic = hippt::clamp(0.0f, 1.0f, metallic);\n        metallic_F82.clamp(0.0f, 1.0f);\n        metallic_F90.clamp(0.0f, 1.0f);\n        anisotropy = hippt::clamp(0.0f, 1.0f, anisotropy);\n        anisotropy_rotation = hippt::clamp(0.0f, 1.0f, anisotropy_rotation);\n        second_roughness_weight  = hippt::clamp(0.0f, 1.0f, second_roughness_weight);\n        second_roughness = hippt::clamp(0.0f, 1.0f, second_roughness);\n\n        specular = hippt::clamp(0.0f, 1.0f, specular);\n        specular_tint = hippt::clamp(0.0f, 1.0f, specular_tint);\n        specular_color.clamp(0.0f, 1.0f);\n        specular_darkening = hippt::clamp(0.0f, 1.0f, specular_darkening);\n\n        coat = hippt::clamp(0.0f, 1.0f, coat);\n        coat_medium_absorption.clamp(0.0f, 1.0f);\n        coat_roughness = hippt::clamp(MaterialConstants::ROUGHNESS_CLAMP, 1.0f, coat_roughness);\n        coat_roughening = hippt::clamp(0.0f, 1.0f, coat_roughening);\n        coat_darkening = hippt::clamp(0.0f, 1.0f, coat_darkening);\n        coat_anisotropy = hippt::clamp(0.0f, 1.0f, coat_anisotropy);\n        coat_anisotropy_rotation = hippt::clamp(0.0f, 1.0f, coat_anisotropy_rotation);\n        \n        sheen = hippt::clamp(0.0f, 1.0f, sheen);\n        sheen_roughness = hippt::clamp(MaterialConstants::ROUGHNESS_CLAMP, 1.0f, sheen_roughness);\n        sheen_color.clamp(0.0f, 1.0f);\n\n        specular_transmission = hippt::clamp(0.0f, 1.0f, specular_transmission);\n        // Avoiding zero\n        absorption_at_distance = hippt::max(absorption_at_distance, 1.0e-4f);\n        absorption_color = ColorRGB32F::max(absorption_color, ColorRGB32F(1.0f / 512.0f));\n        absorption_color.clamp(0.0f, 1.0f);\n\n        dispersion_abbe_number = hippt::max(1.0e-5f, dispersion_abbe_number);\n        dispersion_scale = hippt::clamp(0.0f, 1.0f, dispersion_scale);\n\n        thin_film = hippt::clamp(0.0f, 1.0f, thin_film);\n        thin_film_hue_shift_degrees = hippt::clamp(0.0f, 360.0f, thin_film_hue_shift_degrees);\n        thin_film_ior = hippt::max(1.0005f, thin_film_ior);\n\n        alpha_opacity = hippt::clamp(0.0f, 1.0f, alpha_opacity);\n\n        dielectric_priority = hippt::clamp(0, (int)StackPriorityEntry::PRIORITY_BIT_MASK >> StackPriorityEntry::PRIORITY_BIT_SHIFT, dielectric_priority);\n\n        // Clamping to avoid negative emission\n        emission = ColorRGB32F::max(ColorRGB32F(0.0f), emission);\n\n        if (specular_transmission == 0.0f && diffuse_transmission == 0.0f)\n            // No transmission means that we should never skip this boundary --> max priority\n            dielectric_priority = (1 << StackPriorityEntry::PRIORITY_MAXIMUM) - 1;\n    }\n\n    ColorRGB32F emission = ColorRGB32F{ 0.0f, 0.0f, 0.0f };\n    float emission_strength = 1.0f; // This factor is baked into 'emission' before being sent to the GPU\n    float global_emissive_factor = 1.0f; // This factor is baked into 'emission' before being sent to the GPU\n    bool emissive_texture_used = false;\n\n    ColorRGB32F base_color = ColorRGB32F(1.0f);\n\n    float roughness = 0.3f;\n    float oren_nayar_sigma = 0.34906585039886591538f; // 20 degrees standard deviation in radian\n\n    // Parameters for Adobe 2023 F82-tint model\n    float metallic = 0.0f;\n    float metallic_F90_falloff_exponent = 5.0f;\n    // F0 is not here as it uses the 'base_color' of the material\n    ColorRGB32F metallic_F82 = ColorRGB32F(1.0f);\n    ColorRGB32F metallic_F90 = ColorRGB32F(1.0f);\n    float anisotropy = 0.0f;\n    float anisotropy_rotation = 0.0f;\n    float second_roughness_weight = 0.0f;\n    float second_roughness = 0.5f;\n    // Whether or not to do energy compensation of the metallic layer\n    // for that material\n    bool do_metallic_energy_compensation = true;\n\n    // Specular intensity\n    float specular = 1.0f;\n    // Specular tint intensity. \n    // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n    float specular_tint = 1.0f;\n    ColorRGB32F specular_color = ColorRGB32F(1.0f);\n    // Same as coat darkening but for total internal reflection inside the specular layer\n    // that sits on top of the diffuse base\n    // \n    // Disabled by default for \"artistic expectations\" but this is not physically accurate\n    float specular_darkening = 0.0f;\n    // Whether or not to do energy compensation of the specular/diffuse layer\n    // for that material\n    bool do_specular_energy_compensation = true;\n\n    float coat = 0.0f;\n    ColorRGB32F coat_medium_absorption = ColorRGB32F{ 1.0f, 1.0f, 1.0f };\n    // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n    // that will happen inside the coat\n    float coat_medium_thickness = 5.0f;\n    float coat_roughness = 0.0f;\n    // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n    // i.e. the specular/metallic/transmission layers.\n    // \n    // The option is however given here to artistically disable\n    // that behavior by using coat roughening = 0.0f.\n    float coat_roughening = 1.0f;\n    // Because of the total internal reflection that can happen inside the coat layer (i.e.\n    // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n    // clearcoat will appear will increased saturation.\n    float coat_darkening = 1.0f;\n    float coat_anisotropy = 0.0f;\n    float coat_anisotropy_rotation = 0.0f;\n    float coat_ior = 1.5f;\n    // Whether or not to do energy compensation of the clearcoat layer\n    // for that material\n    bool do_coat_energy_compensation = true;\n\n    float sheen = 0.0f; // Sheen strength\n    float sheen_roughness = 0.5f;\n    ColorRGB32F sheen_color = ColorRGB32F(1.0f);\n\n    float ior = 1.40f;\n    float specular_transmission = 0.0f;\n    float diffuse_transmission = 0.0f;\n    // At what distance is the light absorbed to the given absorption_color\n    float absorption_at_distance = 1.0f;\n    // Color of the light absorption when traveling through the medium\n    ColorRGB32F absorption_color = ColorRGB32F(1.0f);\n    float dispersion_scale = 0.0f;\n    float dispersion_abbe_number = 20.0f;\n    bool thin_walled = false;\n    // Whether or not to do energy compensation of the glass layer\n    // for that material\n    bool do_glass_energy_compensation = true;\n\n    float thin_film = 0.0f;\n    float thin_film_ior = 1.3f;\n    float thin_film_thickness = 500.0f;\n    float thin_film_kappa_3 = 0.0f;\n    float thin_film_hue_shift_degrees = 0.0f;\n    float thin_film_base_ior_override = 1.0f;\n    bool thin_film_do_ior_override = false;\n\n    // 1.0f makes the material completely opaque\n    // 0.0f completely transparent (becomes invisible)\n    float alpha_opacity = 1.0f;\n\n    // Nested dielectric parameter\n    int dielectric_priority = 0;\n\n\n\n\n\n    int normal_map_texture_index = MaterialConstants::NO_TEXTURE;\n\n    int emission_texture_index = MaterialConstants::NO_TEXTURE;\n    int base_color_texture_index = MaterialConstants::NO_TEXTURE;\n\n    // If not MaterialConstants::NO_TEXTURE, there is only one texture for the metallic and the roughness parameters in which.\n    // case the green channel is the roughness and the blue channel is the metalness\n    int roughness_metallic_texture_index = MaterialConstants::NO_TEXTURE;\n    int roughness_texture_index = MaterialConstants::NO_TEXTURE;\n    int metallic_texture_index = MaterialConstants::NO_TEXTURE;\n    int anisotropic_texture_index = MaterialConstants::NO_TEXTURE;\n\n    int specular_texture_index = MaterialConstants::NO_TEXTURE;\n    int coat_texture_index = MaterialConstants::NO_TEXTURE;\n    int sheen_texture_index = MaterialConstants::NO_TEXTURE;\n    int specular_transmission_texture_index = MaterialConstants::NO_TEXTURE;\n};\n\n#endif // #ifndef  __KERNELCC__\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialConstants.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_CONSTANTS_H\n#define HOST_DEVICE_COMMON_MATERIAL_CONSTANTS_H\n\nstruct MaterialConstants\n{\n\tstatic constexpr int NO_TEXTURE = 65535;\n\t// When an emissive texture is read and is determine to be\n\t// constant, no emissive texture will be used. Instead,\n\t// we'll just set the emission of the material to that constant emission value\n\t// and the emissive texture index of the material will be replaced by\n\t// CONSTANT_EMISSIVE_TEXTURE\n\tstatic constexpr int CONSTANT_EMISSIVE_TEXTURE = 65534;\n\t// Maximum number of different textures per scene\n\tstatic constexpr int MAX_TEXTURE_COUNT = 65533;\n\n\tstatic constexpr float ROUGHNESS_CLAMP = 1.0e-4f;\n\tstatic constexpr float PERFECTLY_SMOOTH_ROUGHNESS_THRESHOLD = 1.0e-2f;\n\tstatic constexpr float DELTA_DISTRIBUTION_HIGH_VALUE = 1.0e9f;\n\tstatic constexpr float DELTA_DISTRIBUTION_ALIGNEMENT_THRESHOLD = 0.999999f;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialPacked.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_PACKED_H\n#define HOST_DEVICE_COMMON_MATERIAL_PACKED_H\n\n#include \"HostDeviceCommon/Packing.h\"\n#include \"HostDeviceCommon/Material/MaterialConstants.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n\n /**\n  * Packed material for use in the shaders\n  */\nstruct DevicePackedEffectiveMaterial\n{\n    enum PackedFlagsIndices : unsigned char\n    {\n        PACKED_THIN_WALLED = 0,\n        PACKED_EMISSIVE_TEXTURE_USED = 1,\n        PACKED_THIN_FILM_DO_IOR_OVERRIDE = 2,\n        GLASS_ENERGY_COMPENSATION = 3,\n        CLEARCOAT_ENERGY_COMPENSATION = 4,\n        METALLIC_ENERGY_COMPENSATION = 5,\n        SPECULAR_ENERGY_COMPENSATION = 6,\n        PACKED_ENFORCE_STRONG_ENERGY_CONSERVATION = 7,\n    };\n\n    enum PackedAnisotropyGroupIndices : unsigned char\n    {\n        PACKED_ANISOTROPY = 0,\n        PACKED_ANISOTROPY_ROTATION = 1,\n        PACKED_SECOND_ROUGHNESS_WEIGHT = 2,\n        PACKED_SECOND_ROUGHNESS = 3,\n    };\n\n    enum PackedSpecularGroupIndices : unsigned char\n    {\n        PACKED_SPECULAR = 0,\n        PACKED_SPECULAR_DARKENING = 1,\n        PACKED_COAT_ROUGHNESS = 2\n    };\n\n    enum PackedCoatGroupIndices : unsigned char\n    {\n        PACKED_COAT_ROUGHENING = 0,\n        PACKED_COAT_DARKENING = 1,\n        PACKED_COAT_ANISOTROPY = 2,\n        PACKED_COAT_ANISOTROPY_ROTATION = 3,\n    };\n\n    enum PackedSheenRoughnessGroupIndices : unsigned char\n    {\n        PACKED_SHEEN_ROUGHNESS = 0,\n        PACKED_SPECULAR_TRANSMISSION = 1,\n        PACKED_DISPERSION_SCALE = 2,\n        PACKED_THIN_FILM = 3,\n    };\n\n    enum PackedAlphaOpacityGroupIndices : unsigned char\n    {\n        PACKED_ALPHA_OPACITY = 0,\n        PACKED_THIN_FILM_HUE_SHIFT = 1,\n        PACKED_DIELECTRIC_PRIORITY = 0,\n        PACKED_ENERGY_PRESERVATION_SAMPLES = 1,\n    };\n\n    HIPRT_HOST_DEVICE bool is_emissive() const\n    {\n        return !hippt::is_zero(emission.r)\n            || !hippt::is_zero(emission.g)\n            || !hippt::is_zero(emission.b)\n            || get_emissive_texture_used();\n    }\n\n    /**\n     * This function packs an UnpackedEffectiveMaterial into its packed version.\n     * \n     * This is used in the shaders when a material is read after hitting some geometry: \n     * the texture of the material will be evaluated, transforming a \n     * DeviceUnpackedTexturedMaterial into a DeviceUnpackedEffectiveMaterial.\n     * \n     * That DeviceUnpackedEffectiveMaterial will then be packed (using the pack() function below)\n     * before being written to the G-buffer\n     */\n    HIPRT_HOST_DEVICE static DevicePackedEffectiveMaterial pack(const DeviceUnpackedEffectiveMaterial& unpacked)\n    {\n        DevicePackedEffectiveMaterial packed;\n\n        packed.set_emission(unpacked.emission);\n        packed.set_emissive_texture_used(unpacked.emissive_texture_used);\n\n        packed.set_base_color(unpacked.base_color);\n\n        packed.set_roughness(unpacked.roughness);\n        packed.set_oren_nayar_sigma(unpacked.oren_nayar_sigma);\n\n        packed.set_metallic(unpacked.metallic);\n        packed.set_metallic_F90_falloff_exponent(unpacked.metallic_F90_falloff_exponent);\n        packed.set_metallic_F82(unpacked.metallic_F82);\n        packed.set_metallic_F90(unpacked.metallic_F90);\n        packed.set_anisotropy(unpacked.anisotropy);\n        packed.set_anisotropy_rotation(unpacked.anisotropy_rotation);\n        packed.set_second_roughness_weight(unpacked.second_roughness_weight);\n        packed.set_second_roughness(unpacked.second_roughness);\n        packed.set_metallic_energy_compensation(unpacked.do_metallic_energy_compensation);\n\n        packed.set_specular(unpacked.specular);\n        packed.set_specular_tint(unpacked.specular_tint);\n        packed.set_specular_color(unpacked.specular_color);\n        packed.set_specular_darkening(unpacked.specular_darkening);\n        packed.set_specular_energy_compensation(unpacked.do_specular_energy_compensation);\n\n        packed.set_coat(unpacked.coat);\n        packed.set_coat_medium_absorption(unpacked.coat_medium_absorption);\n        packed.set_coat_medium_thickness(unpacked.coat_medium_thickness);\n        packed.set_coat_roughness(unpacked.coat_roughness);\n        packed.set_coat_roughening(unpacked.coat_roughening);\n        packed.set_coat_darkening(unpacked.coat_darkening);\n        packed.set_coat_anisotropy(unpacked.coat_anisotropy);\n        packed.set_coat_anisotropy_rotation(unpacked.coat_anisotropy_rotation);\n        packed.set_coat_ior(unpacked.coat_ior);\n        packed.set_coat_energy_compensation(unpacked.do_coat_energy_compensation);\n\n        packed.set_sheen(unpacked.sheen);\n        packed.set_sheen_roughness(unpacked.sheen_roughness);\n        packed.set_sheen_color(unpacked.sheen_color);\n\n        packed.set_ior(unpacked.ior);\n        packed.set_specular_transmission(unpacked.specular_transmission);\n        packed.set_diffuse_transmission(unpacked.diffuse_transmission);\n\n        packed.set_absorption_at_distance(unpacked.absorption_at_distance);\n        packed.set_absorption_color(unpacked.absorption_color);\n        packed.set_dispersion_scale(unpacked.dispersion_scale);\n        packed.set_dispersion_abbe_number(unpacked.dispersion_abbe_number);\n        packed.set_thin_walled(unpacked.thin_walled);\n        packed.set_glass_energy_compensation(unpacked.do_glass_energy_compensation);\n\n        packed.set_thin_film(unpacked.thin_film);\n        packed.set_thin_film_ior(unpacked.thin_film_ior);\n        packed.set_thin_film_thickness(unpacked.thin_film_thickness);\n        packed.set_thin_film_kappa_3(unpacked.thin_film_kappa_3);\n        packed.set_thin_film_hue_shift_degrees(unpacked.thin_film_hue_shift_degrees);\n        packed.set_thin_film_base_ior_override(unpacked.thin_film_base_ior_override);\n        packed.set_thin_film_do_ior_override(unpacked.thin_film_do_ior_override);\n\n        packed.set_alpha_opacity(unpacked.alpha_opacity);\n        packed.set_dielectric_priority(unpacked.get_dielectric_priority());\n        packed.set_energy_preservation_monte_carlo_samples(unpacked.energy_preservation_monte_carlo_samples);\n        packed.set_enforce_strong_energy_conservation(unpacked.enforce_strong_energy_conservation);\n\n        return packed;\n    }\n\n    HIPRT_HOST_DEVICE DeviceUnpackedEffectiveMaterial unpack() const\n    {\n        DeviceUnpackedEffectiveMaterial unpacked;\n\n        unpacked.emission = this->get_emission();\n        unpacked.emissive_texture_used = this->get_emissive_texture_used();\n\n        unpacked.base_color = this->get_base_color();\n\n        unpacked.roughness = this->get_roughness();\n        unpacked.oren_nayar_sigma = this->get_oren_nayar_sigma();\n\n        unpacked.metallic = this->get_metallic();\n        unpacked.metallic_F90_falloff_exponent = this->get_metallic_F90_falloff_exponent();\n        unpacked.metallic_F82 = this->get_metallic_F82();\n        unpacked.metallic_F90 = this->get_metallic_F90();\n        unpacked.anisotropy = this->get_anisotropy();\n        unpacked.anisotropy_rotation = this->get_anisotropy_rotation();\n        unpacked.second_roughness_weight = this->get_second_roughness_weight();\n        unpacked.second_roughness = this->get_second_roughness();\n        unpacked.do_metallic_energy_compensation = this->get_do_metallic_energy_compensation();\n\n        unpacked.specular = this->get_specular();\n        unpacked.specular_tint = this->get_specular_tint();\n        unpacked.specular_color = this->get_specular_color();\n        unpacked.specular_darkening = this->get_specular_darkening();\n        unpacked.do_specular_energy_compensation = this->get_do_specular_energy_compensation();\n\n        unpacked.coat = this->get_coat();\n        unpacked.coat_medium_absorption = this->get_coat_medium_absorption();\n        unpacked.coat_medium_thickness = this->get_coat_medium_thickness();\n        unpacked.coat_roughness = this->get_coat_roughness();\n        unpacked.coat_roughening = this->get_coat_roughening();\n        unpacked.coat_darkening = this->get_coat_darkening();\n        unpacked.coat_anisotropy = this->get_coat_anisotropy();\n        unpacked.coat_anisotropy_rotation = this->get_coat_anisotropy_rotation();\n        unpacked.coat_ior = this->get_coat_ior();\n        unpacked.do_coat_energy_compensation = this->get_do_coat_energy_compensation();\n\n        unpacked.sheen = this->get_sheen();\n        unpacked.sheen_roughness = this->get_sheen_roughness();\n        unpacked.sheen_color = this->get_sheen_color();\n\n        unpacked.ior = this->get_ior();\n        unpacked.specular_transmission = this->get_specular_transmission();\n        unpacked.diffuse_transmission = this->get_diffuse_transmission();\n\n        unpacked.absorption_at_distance = this->get_absorption_at_distance();\n        unpacked.absorption_color = this->get_absorption_color();\n        unpacked.dispersion_scale = this->get_dispersion_scale();\n        unpacked.dispersion_abbe_number = this->get_dispersion_abbe_number();\n        unpacked.thin_walled = this->get_thin_walled();\n        unpacked.do_glass_energy_compensation = this->get_do_glass_energy_compensation();\n\n        unpacked.thin_film = this->get_thin_film();\n        unpacked.thin_film_ior = this->get_thin_film_ior();\n        unpacked.thin_film_thickness = this->get_thin_film_thickness();\n        unpacked.thin_film_kappa_3 = this->get_thin_film_kappa_3();\n        unpacked.thin_film_hue_shift_degrees = this->get_thin_film_hue_shift_degrees();\n        unpacked.thin_film_base_ior_override = this->get_thin_film_base_ior_override();\n        unpacked.thin_film_do_ior_override = this->get_thin_film_do_ior_override();\n\n        unpacked.alpha_opacity = this->get_alpha_opacity();\n        unpacked.set_dielectric_priority(this->get_dielectric_priority());\n        unpacked.energy_preservation_monte_carlo_samples = this->get_energy_preservation_monte_carlo_samples();\n        unpacked.enforce_strong_energy_conservation = this->get_enforce_strong_energy_conservation();\n\n        return unpacked;\n    }\n\n    HIPRT_HOST_DEVICE ColorRGB32F get_emission() const { return this->emission; }\n    HIPRT_HOST_DEVICE bool get_emissive_texture_used() const { return flags.get_bool<PackedFlagsIndices::PACKED_EMISSIVE_TEXTURE_USED>(); }\n\n    HIPRT_HOST_DEVICE ColorRGB32F get_base_color() const { return base_color_roughness.get_color(); }\n\n    HIPRT_HOST_DEVICE float get_roughness() const { return base_color_roughness.get_float(); }\n    HIPRT_HOST_DEVICE float get_oren_nayar_sigma() const { return this->oren_nayar_sigma; }\n\n    HIPRT_HOST_DEVICE float get_metallic() const { return metallic_F90_and_metallic.get_float(); }\n    HIPRT_HOST_DEVICE float get_metallic_F90_falloff_exponent() const { return this->metallic_F90_falloff_exponent; }\n    HIPRT_HOST_DEVICE ColorRGB32F get_metallic_F82() const { return metallic_F82_packed_and_diffuse_transmission.get_color(); }\n    HIPRT_HOST_DEVICE ColorRGB32F get_metallic_F90() const { return metallic_F90_and_metallic.get_color(); }\n    HIPRT_HOST_DEVICE float get_anisotropy() const { return anisotropy_and_rotation_and_second_roughness.get_float<PackedAnisotropyGroupIndices::PACKED_ANISOTROPY>(); }\n    HIPRT_HOST_DEVICE float get_anisotropy_rotation() const { return anisotropy_and_rotation_and_second_roughness.get_float<PackedAnisotropyGroupIndices::PACKED_ANISOTROPY_ROTATION>(); }\n    HIPRT_HOST_DEVICE float get_second_roughness_weight() const { return anisotropy_and_rotation_and_second_roughness.get_float<PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS_WEIGHT>(); }\n    HIPRT_HOST_DEVICE float get_second_roughness() const { return anisotropy_and_rotation_and_second_roughness.get_float<PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS>(); }\n    HIPRT_HOST_DEVICE bool get_do_metallic_energy_compensation() const { return flags.get_bool<PackedFlagsIndices::METALLIC_ENERGY_COMPENSATION>(); }\n\n    HIPRT_HOST_DEVICE float get_specular() const { return specular_and_darkening_and_coat_roughness.get_float<PackedSpecularGroupIndices::PACKED_SPECULAR>(); }\n    HIPRT_HOST_DEVICE float get_specular_tint() const { return specular_color_and_tint_factor.get_float(); }\n    HIPRT_HOST_DEVICE ColorRGB32F get_specular_color() const { return specular_color_and_tint_factor.get_color(); }\n    HIPRT_HOST_DEVICE float get_specular_darkening() const { return specular_and_darkening_and_coat_roughness.get_float<PackedSpecularGroupIndices::PACKED_SPECULAR_DARKENING>(); }\n    HIPRT_HOST_DEVICE bool get_do_specular_energy_compensation() const { return flags.get_bool<PackedFlagsIndices::SPECULAR_ENERGY_COMPENSATION>(); }\n\n    HIPRT_HOST_DEVICE float get_coat() const { return coat_and_medium_absorption.get_float(); }\n    HIPRT_HOST_DEVICE ColorRGB32F get_coat_medium_absorption() const { return coat_and_medium_absorption.get_color(); }\n    HIPRT_HOST_DEVICE float get_coat_medium_thickness() const { return this->coat_medium_thickness; }\n    HIPRT_HOST_DEVICE float get_coat_roughness() const { return specular_and_darkening_and_coat_roughness.get_float<PackedSpecularGroupIndices::PACKED_COAT_ROUGHNESS>(); }\n    HIPRT_HOST_DEVICE float get_coat_roughening() const { return coat_roughening_darkening_anisotropy_and_rotation.get_float<PackedCoatGroupIndices::PACKED_COAT_ROUGHENING>(); }\n    HIPRT_HOST_DEVICE float get_coat_darkening() const { return coat_roughening_darkening_anisotropy_and_rotation.get_float<PackedCoatGroupIndices::PACKED_COAT_DARKENING>(); }\n    HIPRT_HOST_DEVICE float get_coat_anisotropy() const { return coat_roughening_darkening_anisotropy_and_rotation.get_float<PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY>(); }\n    HIPRT_HOST_DEVICE float get_coat_anisotropy_rotation() const { return coat_roughening_darkening_anisotropy_and_rotation.get_float<PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY_ROTATION>(); }\n    HIPRT_HOST_DEVICE float get_coat_ior() const { return this->coat_ior; }\n    HIPRT_HOST_DEVICE bool get_do_coat_energy_compensation() const { return flags.get_bool<PackedFlagsIndices::CLEARCOAT_ENERGY_COMPENSATION>(); }\n\n    HIPRT_HOST_DEVICE float get_sheen() const { return sheen_and_color.get_float(); }\n    HIPRT_HOST_DEVICE float get_sheen_roughness() const { return sheen_roughness_transmission_dispersion_thin_film.get_float<PackedSheenRoughnessGroupIndices::PACKED_SHEEN_ROUGHNESS>(); }\n    HIPRT_HOST_DEVICE ColorRGB32F get_sheen_color() const { return sheen_and_color.get_color(); }\n\n    HIPRT_HOST_DEVICE float get_ior() const { return this->ior; }\n    HIPRT_HOST_DEVICE float get_specular_transmission() const { return sheen_roughness_transmission_dispersion_thin_film.get_float<PackedSheenRoughnessGroupIndices::PACKED_SPECULAR_TRANSMISSION>(); }\n    HIPRT_HOST_DEVICE float get_diffuse_transmission() const { return metallic_F82_packed_and_diffuse_transmission.get_float(); }\n    HIPRT_HOST_DEVICE float get_absorption_at_distance() const { return this->absorption_at_distance; }\n    HIPRT_HOST_DEVICE ColorRGB32F get_absorption_color() const { return absorption_color_packed.get_color(); }\n    HIPRT_HOST_DEVICE float get_dispersion_scale() const { return sheen_roughness_transmission_dispersion_thin_film.get_float<PackedSheenRoughnessGroupIndices::PACKED_DISPERSION_SCALE>(); }\n    HIPRT_HOST_DEVICE float get_dispersion_abbe_number() const { return this->dispersion_abbe_number; }\n    HIPRT_HOST_DEVICE bool get_thin_walled() const { return flags.get_bool<PackedFlagsIndices::PACKED_THIN_WALLED >(); }\n    HIPRT_HOST_DEVICE bool get_do_glass_energy_compensation() const { return flags.get_bool<PackedFlagsIndices::GLASS_ENERGY_COMPENSATION>(); }\n\n    HIPRT_HOST_DEVICE float get_thin_film() const { return sheen_roughness_transmission_dispersion_thin_film.get_float<PackedSheenRoughnessGroupIndices::PACKED_THIN_FILM>(); }\n    HIPRT_HOST_DEVICE float get_thin_film_ior() const { return this->thin_film_ior; }\n    HIPRT_HOST_DEVICE float get_thin_film_thickness() const { return this->thin_film_thickness; }\n    HIPRT_HOST_DEVICE float get_thin_film_kappa_3() const { return this->thin_film_kappa_3; }\n    HIPRT_HOST_DEVICE float get_thin_film_hue_shift_degrees() const { return alpha_thin_film_hue_dielectric_priority.get_float<PackedAlphaOpacityGroupIndices::PACKED_THIN_FILM_HUE_SHIFT>(); }\n    HIPRT_HOST_DEVICE float get_thin_film_base_ior_override() const { return this->thin_film_base_ior_override; }\n    HIPRT_HOST_DEVICE bool get_thin_film_do_ior_override() const { return flags.get_bool<PackedFlagsIndices::PACKED_THIN_FILM_DO_IOR_OVERRIDE>(); }\n\n    HIPRT_HOST_DEVICE float get_alpha_opacity() const { return alpha_thin_film_hue_dielectric_priority.get_float<PackedAlphaOpacityGroupIndices::PACKED_ALPHA_OPACITY>(); }\n    HIPRT_HOST_DEVICE unsigned char get_dielectric_priority() const { return alpha_thin_film_hue_dielectric_priority.get_uchar<PackedAlphaOpacityGroupIndices::PACKED_DIELECTRIC_PRIORITY>(); }\n\n    HIPRT_HOST_DEVICE unsigned char get_energy_preservation_monte_carlo_samples() const { return alpha_thin_film_hue_dielectric_priority.get_uchar<PackedAlphaOpacityGroupIndices::PACKED_ENERGY_PRESERVATION_SAMPLES>(); }\n    HIPRT_HOST_DEVICE bool get_enforce_strong_energy_conservation() const { return flags.get_bool<PackedFlagsIndices::PACKED_ENFORCE_STRONG_ENERGY_CONSERVATION>(); }\n\n\n\n\n    HIPRT_HOST_DEVICE void set_emission(ColorRGB32F emission_) { this->emission = emission_; }\n    HIPRT_HOST_DEVICE void set_emissive_texture_used(bool emissive_texture_used) { flags.set_bool<PackedFlagsIndices::PACKED_EMISSIVE_TEXTURE_USED>(emissive_texture_used); }\n\n    HIPRT_HOST_DEVICE void set_base_color(ColorRGB32F base_color) { base_color_roughness.set_color(base_color); }\n\n    HIPRT_HOST_DEVICE void set_roughness(float roughness) { base_color_roughness.set_float(roughness); }\n    HIPRT_HOST_DEVICE void set_oren_nayar_sigma(float oren_nayar_sigma_) { this->oren_nayar_sigma = oren_nayar_sigma_; }\n\n    HIPRT_HOST_DEVICE void set_metallic(float metallic) { metallic_F90_and_metallic.set_float(metallic); }\n    HIPRT_HOST_DEVICE void set_metallic_F90_falloff_exponent(float metallic_F90_falloff_exponent_) { this->metallic_F90_falloff_exponent = metallic_F90_falloff_exponent_; }\n    HIPRT_HOST_DEVICE void set_metallic_F82(ColorRGB32F metallic_F82) { metallic_F82_packed_and_diffuse_transmission.set_color(metallic_F82); }\n    HIPRT_HOST_DEVICE void set_metallic_F90(ColorRGB32F metallic_F90) { metallic_F90_and_metallic.set_color(metallic_F90); }\n    HIPRT_HOST_DEVICE void set_anisotropy(float anisotropy) { anisotropy_and_rotation_and_second_roughness.set_float<PackedAnisotropyGroupIndices::PACKED_ANISOTROPY>(anisotropy); }\n    HIPRT_HOST_DEVICE void set_anisotropy_rotation(float anisotropy_rotation) { anisotropy_and_rotation_and_second_roughness.set_float<PackedAnisotropyGroupIndices::PACKED_ANISOTROPY_ROTATION>(anisotropy_rotation); }\n    HIPRT_HOST_DEVICE void set_second_roughness_weight(float second_roughness_weight) { anisotropy_and_rotation_and_second_roughness.set_float<PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS_WEIGHT>(second_roughness_weight); }\n    HIPRT_HOST_DEVICE void set_second_roughness(float second_roughness) { anisotropy_and_rotation_and_second_roughness.set_float<PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS>(second_roughness); }\n    HIPRT_HOST_DEVICE void set_metallic_energy_compensation(bool do_metallic_energy_compensation) { flags.set_bool<PackedFlagsIndices::METALLIC_ENERGY_COMPENSATION>(do_metallic_energy_compensation); }\n\n    HIPRT_HOST_DEVICE void set_specular(float specular) { specular_and_darkening_and_coat_roughness.set_float<PackedSpecularGroupIndices::PACKED_SPECULAR>(specular); }\n    HIPRT_HOST_DEVICE void set_specular_tint(float specular_tint) { specular_color_and_tint_factor.set_float(specular_tint); }\n    HIPRT_HOST_DEVICE void set_specular_color(ColorRGB32F specular_color) { specular_color_and_tint_factor.set_color(specular_color); }\n    HIPRT_HOST_DEVICE void set_specular_darkening(float specular_darkening) { specular_and_darkening_and_coat_roughness.set_float<PackedSpecularGroupIndices::PACKED_SPECULAR_DARKENING>(specular_darkening); }\n    HIPRT_HOST_DEVICE void set_specular_energy_compensation(bool do_specular_energy_compensation) { flags.set_bool<PackedFlagsIndices::SPECULAR_ENERGY_COMPENSATION>(do_specular_energy_compensation); }\n\n    HIPRT_HOST_DEVICE void set_coat(float coat) { coat_and_medium_absorption.set_float(coat); }\n    HIPRT_HOST_DEVICE void set_coat_medium_absorption(ColorRGB32F coat_medium_absorption) { coat_and_medium_absorption.set_color(coat_medium_absorption); }\n    HIPRT_HOST_DEVICE void set_coat_medium_thickness(float coat_medium_thickness_) { this->coat_medium_thickness = coat_medium_thickness_; }\n    HIPRT_HOST_DEVICE void set_coat_roughness(float coat_roughness) { specular_and_darkening_and_coat_roughness.set_float<PackedSpecularGroupIndices::PACKED_COAT_ROUGHNESS>(coat_roughness); }\n    HIPRT_HOST_DEVICE void set_coat_roughening(float coat_roughening) { coat_roughening_darkening_anisotropy_and_rotation.set_float<PackedCoatGroupIndices::PACKED_COAT_ROUGHENING>(coat_roughening); }\n    HIPRT_HOST_DEVICE void set_coat_darkening(float coat_darkening) { coat_roughening_darkening_anisotropy_and_rotation.set_float<PackedCoatGroupIndices::PACKED_COAT_DARKENING>(coat_darkening); }\n    HIPRT_HOST_DEVICE void set_coat_anisotropy(float coat_anisotropy) { coat_roughening_darkening_anisotropy_and_rotation.set_float<PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY>(coat_anisotropy); }\n    HIPRT_HOST_DEVICE void set_coat_anisotropy_rotation(float coat_anisotropy_rotation) { coat_roughening_darkening_anisotropy_and_rotation.set_float<PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY_ROTATION>(coat_anisotropy_rotation); }\n    HIPRT_HOST_DEVICE void set_coat_ior(float coat_ior_) { this->coat_ior = coat_ior_; }\n    HIPRT_HOST_DEVICE void set_coat_energy_compensation(bool do_coat_energy_compensation) { flags.set_bool<PackedFlagsIndices::CLEARCOAT_ENERGY_COMPENSATION>(do_coat_energy_compensation); }\n\n    HIPRT_HOST_DEVICE void set_sheen(float sheen) { sheen_and_color.set_float(sheen); }\n    HIPRT_HOST_DEVICE void set_sheen_roughness(float sheen_roughness) { sheen_roughness_transmission_dispersion_thin_film.set_float<PackedSheenRoughnessGroupIndices::PACKED_SHEEN_ROUGHNESS>(sheen_roughness); }\n    HIPRT_HOST_DEVICE void set_sheen_color(ColorRGB32F sheen_color) { sheen_and_color.set_color(sheen_color); }\n\n    HIPRT_HOST_DEVICE void set_ior(float ior_) { this->ior = ior_; }\n    HIPRT_HOST_DEVICE void set_specular_transmission(float specular_transmission) { sheen_roughness_transmission_dispersion_thin_film.set_float<PackedSheenRoughnessGroupIndices::PACKED_SPECULAR_TRANSMISSION>(specular_transmission); }\n    HIPRT_HOST_DEVICE void set_diffuse_transmission(float diffuse_transmission) { metallic_F82_packed_and_diffuse_transmission.set_float(diffuse_transmission); }\n    HIPRT_HOST_DEVICE void set_absorption_at_distance(float absorption_at_distance_) { this->absorption_at_distance = absorption_at_distance_; }\n    HIPRT_HOST_DEVICE void set_absorption_color(ColorRGB32F absorption_color) { absorption_color_packed.set_color(absorption_color); }\n    HIPRT_HOST_DEVICE void set_dispersion_scale(float dispersion_scale) { sheen_roughness_transmission_dispersion_thin_film.set_float<PackedSheenRoughnessGroupIndices::PACKED_DISPERSION_SCALE>(dispersion_scale); }\n    HIPRT_HOST_DEVICE void set_dispersion_abbe_number(float dispersion_abbe_number_) { this->dispersion_abbe_number = dispersion_abbe_number_; }\n    HIPRT_HOST_DEVICE void set_thin_walled(bool thin_walled) { flags.set_bool<PackedFlagsIndices::PACKED_THIN_WALLED >(thin_walled); }\n    HIPRT_HOST_DEVICE void set_glass_energy_compensation(bool do_glass_energy_compensation) { flags.set_bool<PackedFlagsIndices::GLASS_ENERGY_COMPENSATION>(do_glass_energy_compensation); }\n\n    HIPRT_HOST_DEVICE void set_thin_film(float thin_film) { sheen_roughness_transmission_dispersion_thin_film.set_float<PackedSheenRoughnessGroupIndices::PACKED_THIN_FILM>(thin_film); }\n    HIPRT_HOST_DEVICE void set_thin_film_ior(float thin_film_ior_) { this->thin_film_ior = thin_film_ior_; }\n    HIPRT_HOST_DEVICE void set_thin_film_thickness(float thin_film_thickness_) { this->thin_film_thickness = thin_film_thickness_; }\n    HIPRT_HOST_DEVICE void set_thin_film_kappa_3(float thin_film_kappa_3_) { this->thin_film_kappa_3 = thin_film_kappa_3_; }\n    HIPRT_HOST_DEVICE void set_thin_film_hue_shift_degrees(float thin_film_hue_shift_degrees) { alpha_thin_film_hue_dielectric_priority.set_float<PackedAlphaOpacityGroupIndices::PACKED_THIN_FILM_HUE_SHIFT>(thin_film_hue_shift_degrees); }\n    HIPRT_HOST_DEVICE void set_thin_film_base_ior_override(bool thin_film_base_ior_override_) { this->thin_film_base_ior_override = thin_film_base_ior_override_; }\n    HIPRT_HOST_DEVICE void set_thin_film_do_ior_override(bool thin_film_do_ior_override) { flags.set_bool<PackedFlagsIndices::PACKED_THIN_FILM_DO_IOR_OVERRIDE>(thin_film_do_ior_override); }\n\n    HIPRT_HOST_DEVICE void set_alpha_opacity(float alpha_opacity) { alpha_thin_film_hue_dielectric_priority.set_float<PackedAlphaOpacityGroupIndices::PACKED_ALPHA_OPACITY>(alpha_opacity); }\n    HIPRT_HOST_DEVICE void set_dielectric_priority(unsigned char dielectric_priority) { alpha_thin_film_hue_dielectric_priority.set_uchar<PackedAlphaOpacityGroupIndices::PACKED_ALPHA_OPACITY>(dielectric_priority); }\n\n    HIPRT_HOST_DEVICE void set_energy_preservation_monte_carlo_samples(unsigned char energy_preservation_monte_carlo_samples) { alpha_thin_film_hue_dielectric_priority.set_uchar<PackedAlphaOpacityGroupIndices::PACKED_ENERGY_PRESERVATION_SAMPLES>(energy_preservation_monte_carlo_samples); }\n    HIPRT_HOST_DEVICE void set_enforce_strong_energy_conservation(bool enforce_strong_energy_conservation) { flags.set_bool<PackedFlagsIndices::PACKED_ENFORCE_STRONG_ENERGY_CONSERVATION>(enforce_strong_energy_conservation); }\n\nprivate:\n    friend class DevicePackedTexturedMaterialSoAGPUData;\n    friend class DevicePackedTexturedMaterialSoACPUData;\n\n    // Packed flags of the material:\n    //  - thin_walled\n    //      Is the material thin walled? i.e. it doesn't have an interior and light doesn't\n    //      bend as it goes through\n    // \n    //  - emissive_texture_used\n    //      Does the material use an emissive texture?\n    // \n    //  - thin_film_do_ior_override\n    //      Whether or not to override the IORs used for the base material on top of which\n    //      the thin film sits.\n    // \n    //  - enforce_strong_energy_conservation\n    //      If true, 'energy_preservation_monte_carlo_samples' will be used\n    //      to compute the directional albedo of this material.\n    //      This computed directional albedo is then used to ensure perfect energy conservation\n    //      and preservation. \n    // \n    //      This is however very expensive.\n    //      This is usually only needed on clearcoated materials (but even then, the energy loss due to the absence of multiple scattering between\n    //      the clearcoat layer and the BSDF below may be acceptable).\n    // \n    //      Non-clearcoated materials can already ensure perfect (modulo implementation quality) energy \n    //      conservation/preservation with the precomputed LUTs [Turquin, 2019]. \n    // \n    //      See PrincipledBSDFDoEnergyCompensation in this codebase.\n    //\n    //      Values from the 'PackedFlagsIndices' enum should be used\n    //      to retrieve/set from the packed flags\n    UChar8BoolsPacked flags;\n\n    // Full range emission\n    ColorRGB32F emission = ColorRGB32F{ 0.0f, 0.0f, 0.0f };\n\n    // Base color RGB 3x8 bits + roughness uchar [float in [0,1] packed in 8 bit]\n    ColorRGB24bFloat0_1Packed base_color_roughness;\n\n    float oren_nayar_sigma = 0.34906585039886591538f; // 20 degrees standard deviation in radian\n\n    // Parameters for Adobe 2023 F82-tint model\n    // Packs the SDR F90 color and the metalness parameter\n    ColorRGB24bFloat0_1Packed metallic_F90_and_metallic;\n    ColorRGB24bFloat0_1Packed metallic_F82_packed_and_diffuse_transmission;\n    float metallic_F90_falloff_exponent = 5.0f;\n\n    Float4xPacked anisotropy_and_rotation_and_second_roughness;\n\n    // Packed specular color and the intensity of the tint\n    // \n    // Specular tint intensity: Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n    ColorRGB24bFloat0_1Packed specular_color_and_tint_factor;\n\n    // Packed:\n    //  - specular_darkening\n    //      Same as coat darkening but for total internal reflection inside the specular layer\n    //      that sits on top of the diffuse base\n    //\n    //      Disabled by default for artistic \"expectations\"\n    //\n    //  - Specular\n    //      Specular intensity\n    //\n    //  - Coat roughness\n    //      Roughness of the coat \n    // TODO: PACKED 1 FLOAT IS UNUSED IN HERE\n    Float4xPacked specular_and_darkening_and_coat_roughness;\n    float coat_medium_thickness = 5.0f;\n\n    // Packed:\n    //  - Coat\n    //      Intensity of the coat. 0.0f disables the coating\n    //\n    //  - Coat medium absorption color \n    ColorRGB24bFloat0_1Packed coat_and_medium_absorption;\n\n    // Packed:\n    //  - Coat roughening\n    //      Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n    //      i.e. the specular/metallic/transmission layers.\n    // \n    //      The option is however given here to artistically disable\n    //      that behavior by using coat roughening = 0.0f.\n    //\n    //  - Coat darkening\n    //      Because of the total internal reflection that can happen inside the coat layer (i.e.\n    //      light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n    //      clearcoat will appear will increased saturation.\n    //     \n    //  - Coat anisotropy\n    //  - Coat anisotropy rotation\n    Float4xPacked coat_roughening_darkening_anisotropy_and_rotation;\n    float coat_ior = 1.5f;\n\n    // Packed:\n    //  - Sheen intensity. 0.0f disables the sheen effect\n    //\n    //  - Sheen color\n    ColorRGB24bFloat0_1Packed sheen_and_color;\n\n    // IOR of the base material\n    float ior = 1.40f;\n\n    // Packed:\n    //  - Absorption color\n    //      Color of the light absorption when traveling through the medium\n    // TODO: PACKED FLOAT IS UNUSED IN HERE\n    ColorRGB24bFloat0_1Packed absorption_color_packed;\n    float absorption_at_distance = 5.0f;\n\n    // Packed:\n    //  - Sheen roughness\n    // \n    //  - Specular transmission\n    //      How much light is transmitted through the material. This essentially controls the glass lobe\n    //\n    //  - Dispersion scale\n    //      Intensity of the dispersion effect in glass objects\n    //\n    //  - Thin film\n    //      Intensity of the thin-film effect\n    Float4xPacked sheen_roughness_transmission_dispersion_thin_film;\n\n    float dispersion_abbe_number = 20.0f;\n    float thin_film_ior = 1.3f;\n    float thin_film_thickness = 500.0f;\n    float thin_film_kappa_3 = 0.0f;\n    float thin_film_base_ior_override = 1.0f;\n\n    // Packed:\n    //  - Alpha opacity\n    //      1.0f makes the material completely opaque\n    //      0.0f completely transparent (becomes invisible)\n    //\n    //  - Thin film hue shift in degrees\n    // \n    //  - Dielectric priority\n    //      Nested dielectric with priority parameter\n    //\n    //  - Energy preservation samples\n    //      How many samples will be computed for the integration of the directional\n    //      when the strong energy preservation/conservation of the material is enabled\n    Float2xUChar2xPacked alpha_thin_film_hue_dielectric_priority;\n};\n\nstruct DevicePackedTexturedMaterial : public DevicePackedEffectiveMaterial\n{\n    enum NormalMapEmissionIndices : unsigned char\n    {\n        NORMAL_MAP_INDEX = 0,\n        EMISSION_INDEX = 1,\n    };\n\n    enum BaseColorRoughnessMetallicIndices : unsigned char\n    {\n        BASE_COLOR_INDEX = 0,\n        ROUGHNESS_METALLIC_INDEX = 1,\n    };\n\n    enum RoughnessAndMetallicIndices : unsigned char\n    {\n        ROUGHNESS_INDEX = 0,\n        METALLIC_INDEX = 1,\n    };\n\n    enum AnisotropicSpecularIndices : unsigned char\n    {\n        ANISOTROPIC_INDEX = 0,\n        SPECULAR_INDEX = 1,\n    };\n\n    enum CoatSheenIndices : unsigned char\n    {\n        COAT_INDEX = 0,\n        SHEEN_INDEX = 1,\n    };\n\n    enum SpecularTransmissionIndex : unsigned char\n    {\n        SPECULAR_TRANSMISSION_INDEX = 0,\n    };\n\n    HIPRT_HOST_DEVICE DeviceUnpackedTexturedMaterial unpack()\n    {\n        DeviceUnpackedTexturedMaterial out;\n\n        out.normal_map_texture_index = this->get_normal_map_texture_index();\n        out.emission_texture_index = this->get_emission_texture_index();\n        out.base_color_texture_index = this->get_base_color_texture_index();\n\n        out.roughness_metallic_texture_index = this->get_roughness_metallic_texture_index();\n        out.roughness_texture_index = this->get_roughness_texture_index();\n        out.metallic_texture_index = this->get_metallic_texture_index();\n        out.anisotropic_texture_index = this->get_anisotropic_texture_index();\n\n        out.specular_texture_index = this->get_specular_texture_index();\n        out.coat_texture_index = this->get_coat_texture_index();\n        out.sheen_texture_index = this->get_sheen_texture_index();\n        out.specular_transmission_texture_index = this->get_specular_transmission_texture_index();\n\n\n\n\n\n\n        out.emissive_texture_used = this->get_emissive_texture_used();\n        if (!out.emissive_texture_used)\n            out.emission = this->get_emission();\n\n        if (out.base_color_texture_index == MaterialConstants::NO_TEXTURE)\n            out.base_color = this->get_base_color();\n\n        out.roughness = this->get_roughness();\n        out.oren_nayar_sigma = this->get_oren_nayar_sigma();\n\n        // Parameters for Adobe 2023 F82-tint model\n        out.metallic = this->get_metallic();\n        if (out.metallic > 0.0f || out.metallic_texture_index != MaterialConstants::NO_TEXTURE || out.roughness_metallic_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            // We only need to unpack all of this if we actually have a metallic lobe\n\n            out.metallic_F90_falloff_exponent = this->get_metallic_F90_falloff_exponent();\n            // F0 is not here as it uses the 'base_color' of the material\n            out.metallic_F82 = this->get_metallic_F82();\n            out.metallic_F90 = this->get_metallic_F90();\n\n            out.second_roughness_weight = this->get_second_roughness_weight();\n            out.second_roughness = this->get_second_roughness();\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoMetallicEnergyCompensation == KERNEL_OPTION_TRUE\n            out.do_metallic_energy_compensation = this->get_do_metallic_energy_compensation();\n#endif\n        }\n\n        out.anisotropy = this->get_anisotropy();\n        out.anisotropy_rotation = this->get_anisotropy_rotation();\n\n        // Specular intensity\n        out.specular = this->get_specular();\n        if (out.specular > 0.0f || out.specular_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            // Specular tint intensity. \n            // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n            out.specular_tint = this->get_specular_tint();\n            out.specular_color = this->get_specular_color();\n            // Same as coat darkening but for total internal reflection inside the specular layer\n            // that sits on top of the diffuse base\n            //\n            // Disabled by default for artistic \"expectations\"\n            out.specular_darkening = this->get_specular_darkening();\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoSpecularEnergyCompensation == KERNEL_OPTION_TRUE\n            out.do_specular_energy_compensation = this->get_do_specular_energy_compensation();\n#endif\n        }\n\n        out.coat = this->get_coat();\n        if (out.coat > 0.0f || out.coat_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            out.coat_medium_absorption = this->get_coat_medium_absorption();\n            // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n            // that will happen inside the coat\n            out.coat_medium_thickness = this->get_coat_medium_thickness();\n            out.coat_roughness = this->get_coat_roughness();\n            // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n            // i.e. the specular/metallic/transmission layers.\n            // \n            // The option is however given here to artistically disable\n            // that behavior by using coat roughening = 0.0f.\n            out.coat_roughening = this->get_coat_roughening();\n            // Because of the total internal reflection that can happen inside the coat layer (i.e.\n            // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n            // clearcoat will appear will increased saturation.\n            out.coat_darkening = this->get_coat_darkening();\n            out.coat_anisotropy = this->get_coat_anisotropy();\n            out.coat_anisotropy_rotation = this->get_coat_anisotropy_rotation();\n            out.coat_ior = this->get_coat_ior();\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoClearcoatEnergyCompensation == KERNEL_OPTION_TRUE\n            out.do_coat_energy_compensation = this->get_do_coat_energy_compensation();\n#endif\n        }\n\n        out.sheen = this->get_sheen(); // Sheen strength\n        if (out.sheen > 0.0f || out.sheen_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            out.sheen_roughness = this->get_sheen_roughness();\n            out.sheen_color = this->get_sheen_color();\n        }\n\n        out.ior = this->get_ior();\n        out.diffuse_transmission = this->get_diffuse_transmission();\n        out.specular_transmission = this->get_specular_transmission();\n\n        if (out.specular_transmission > 0.0f || out.specular_transmission_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            // Specular transmission specific \n            out.dispersion_scale = this->get_dispersion_scale();\n            out.dispersion_abbe_number = this->get_dispersion_abbe_number();\n            out.thin_walled = this->get_thin_walled();\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoGlassEnergyCompensation == KERNEL_OPTION_TRUE\n            out.do_glass_energy_compensation = this->get_do_glass_energy_compensation();\n#endif\n        }\n        if (out.specular_transmission > 0.0f || out.diffuse_transmission > 0.0f || out.specular_transmission_texture_index != MaterialConstants::NO_TEXTURE)\n        {\n            // Also enabled by diffuse transmission as well as specular transmission\n            \n            // At what distance is the light absorbed to the given absorption_color\n            out.absorption_at_distance = this->get_absorption_at_distance();\n            // Color of the light absorption when traveling through the medium\n            out.absorption_color = this->get_absorption_color();\n        }\n\n        out.thin_film = this->get_thin_film();\n        if (out.thin_film > 0.0f)\n        {\n            out.thin_film_ior = this->get_thin_film_ior();\n            out.thin_film_thickness = this->get_thin_film_thickness();\n            out.thin_film_kappa_3 = this->get_thin_film_kappa_3();\n            // Sending the hue film in [0, 1] to the GPU\n            out.thin_film_hue_shift_degrees = this->get_thin_film_hue_shift_degrees();\n            out.thin_film_base_ior_override = this->get_thin_film_base_ior_override();\n            out.thin_film_do_ior_override = this->get_thin_film_do_ior_override();\n        }\n\n        // 1.0f makes the material completely opaque\n        // 0.0f completely transparent (becomes invisible)\n        out.alpha_opacity = this->get_alpha_opacity();\n\n        // Nested dielectric parameter\n        out.set_dielectric_priority(this->get_dielectric_priority());\n\n        // If true, 'energy_preservation_monte_carlo_samples' will be used\n        // to compute the directional albedo of this material.\n        // This computed directional albedo is then used to ensure perfect energy conservation\n        // and preservation. \n        // \n        // This is however very expensive.\n        // This is usually only needed on clearcoated materials (but even then, the energy loss due to the absence of multiple scattering between\n        // the clearcoat layer and the BSDF below may be acceptable).\n        // \n        // Non-clearcoated materials can already ensure perfect (modulo implementation quality) energy \n        // conservation/preservation with the precomputed LUTs [Turquin, 2019]. \n        // \n        // See PrincipledBSDFDoEnergyCompensation in this codebase.\n        out.enforce_strong_energy_conservation = this->get_enforce_strong_energy_conservation();\n        if (out.enforce_strong_energy_conservation)\n            out.energy_preservation_monte_carlo_samples = this->get_energy_preservation_monte_carlo_samples();\n\n        return out;\n    }\n\n    HIPRT_HOST_DEVICE unsigned short int get_normal_map_texture_index() const { return normal_map_emission_index.get_value<NormalMapEmissionIndices::NORMAL_MAP_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_emission_texture_index() const { return normal_map_emission_index.get_value<NormalMapEmissionIndices::EMISSION_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_base_color_texture_index() const { return base_color_roughness_metallic_index.get_value<BaseColorRoughnessMetallicIndices::BASE_COLOR_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_roughness_metallic_texture_index() const { return base_color_roughness_metallic_index.get_value<BaseColorRoughnessMetallicIndices::ROUGHNESS_METALLIC_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_roughness_texture_index() const { return roughness_and_metallic_index.get_value<RoughnessAndMetallicIndices::ROUGHNESS_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_metallic_texture_index() const { return roughness_and_metallic_index.get_value<RoughnessAndMetallicIndices::METALLIC_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_anisotropic_texture_index() const { return roughness_and_metallic_index.get_value<AnisotropicSpecularIndices::ANISOTROPIC_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_specular_texture_index() const { return anisotropic_specular_index.get_value<AnisotropicSpecularIndices::SPECULAR_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_coat_texture_index() const { return coat_sheen_index.get_value<CoatSheenIndices::COAT_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_sheen_texture_index() const { return coat_sheen_index.get_value<CoatSheenIndices::SHEEN_INDEX>(); }\n    HIPRT_HOST_DEVICE unsigned short int get_specular_transmission_texture_index() const { return specular_transmission_index.get_value<SpecularTransmissionIndex::SPECULAR_TRANSMISSION_INDEX>(); }\n\n    HIPRT_HOST_DEVICE void set_normal_map_texture_index(unsigned short normal_map_index) { normal_map_emission_index.set_value<NormalMapEmissionIndices::NORMAL_MAP_INDEX>(normal_map_index); }\n    HIPRT_HOST_DEVICE void set_emission_texture_index(unsigned short emission_index) { normal_map_emission_index.set_value<NormalMapEmissionIndices::EMISSION_INDEX>(emission_index); }\n    HIPRT_HOST_DEVICE void set_base_color_texture_index(unsigned short base_color_index) { base_color_roughness_metallic_index.set_value<BaseColorRoughnessMetallicIndices::BASE_COLOR_INDEX>(base_color_index); }\n    HIPRT_HOST_DEVICE void set_roughness_metallic_texture_index(unsigned short roughness_metallic_index) { base_color_roughness_metallic_index.set_value<BaseColorRoughnessMetallicIndices::ROUGHNESS_METALLIC_INDEX>(roughness_metallic_index); }\n    HIPRT_HOST_DEVICE void set_roughness_texture_index(unsigned short roughness_index) { roughness_and_metallic_index.set_value<RoughnessAndMetallicIndices::ROUGHNESS_INDEX>(roughness_index); }\n    HIPRT_HOST_DEVICE void set_metallic_texture_index(unsigned short metallic_index) { roughness_and_metallic_index.set_value<RoughnessAndMetallicIndices::METALLIC_INDEX>(metallic_index); }\n    HIPRT_HOST_DEVICE void set_anisotropic_texture_index(unsigned short anisotropic_index) { roughness_and_metallic_index.set_value<AnisotropicSpecularIndices::ANISOTROPIC_INDEX>(anisotropic_index); }\n    HIPRT_HOST_DEVICE void set_specular_texture_index(unsigned short specular_index) { anisotropic_specular_index.set_value<AnisotropicSpecularIndices::SPECULAR_INDEX>(specular_index); }\n    HIPRT_HOST_DEVICE void set_coat_texture_index(unsigned short coat_index) { coat_sheen_index.set_value<CoatSheenIndices::COAT_INDEX>(coat_index); }\n    HIPRT_HOST_DEVICE void set_sheen_texture_index(unsigned short sheen_index) { coat_sheen_index.set_value<CoatSheenIndices::SHEEN_INDEX>(sheen_index); }\n    HIPRT_HOST_DEVICE void set_specular_transmission_texture_index(unsigned short _specular_transmission_index) { specular_transmission_index.set_value<SpecularTransmissionIndex::SPECULAR_TRANSMISSION_INDEX>(_specular_transmission_index); }\n\nprivate:\n    friend class DevicePackedTexturedMaterialSoAGPUData;\n    friend class DevicePackedTexturedMaterialSoACPUData;\n\n    Uint2xPacked normal_map_emission_index;\n    // If the roughness_metallic texture index is not MaterialConstants::NO_TEXTURE, \n    // then there is only one texture for the metallic and the roughness parameters in which.\n    // case the green channel is the roughness and the blue channel is the metalness.\n    Uint2xPacked base_color_roughness_metallic_index;\n    Uint2xPacked roughness_and_metallic_index;\n    Uint2xPacked anisotropic_specular_index;\n    Uint2xPacked coat_sheen_index;\n    // TODO: 1 PACKED UINT IS UNUSED IN HERE\n    Uint2xPacked specular_transmission_index;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialPackedSoA.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_H\n#define HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n#include \"HostDeviceCommon/Material/MaterialUtils.h\"\n\nstruct DevicePackedEffectiveMaterialSoA\n{\n    HIPRT_DEVICE ColorRGB32F get_emission(int material_index) const { return this->emission[material_index]; }\n    HIPRT_DEVICE bool get_emissive_texture_used(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::PACKED_EMISSIVE_TEXTURE_USED>(); }\n\n    HIPRT_DEVICE ColorRGB32F get_base_color(int material_index) const { return base_color_roughness[material_index].get_color(); }\n\n    HIPRT_DEVICE float get_roughness(int material_index) const { return base_color_roughness[material_index].get_float(); }\n    HIPRT_DEVICE float get_oren_nayar_sigma(int material_index) const { return this->oren_nayar_sigma[material_index]; }\n\n    HIPRT_DEVICE float get_metallic(int material_index) const { return metallic_F90_and_metallic[material_index].get_float(); }\n    HIPRT_DEVICE float get_metallic_F90_falloff_exponent(int material_index) const { return this->metallic_F90_falloff_exponent[material_index]; }\n    HIPRT_DEVICE ColorRGB32F get_metallic_F82(int material_index) const { return metallic_F82_packed_and_diffuse_transmission[material_index].get_color(); }\n    HIPRT_DEVICE ColorRGB32F get_metallic_F90(int material_index) const { return metallic_F90_and_metallic[material_index].get_color(); }\n    HIPRT_DEVICE float get_anisotropy(int material_index) const { return anisotropy_and_rotation_and_second_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedAnisotropyGroupIndices::PACKED_ANISOTROPY>(); }\n    HIPRT_DEVICE float get_anisotropy_rotation(int material_index) const { return anisotropy_and_rotation_and_second_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedAnisotropyGroupIndices::PACKED_ANISOTROPY_ROTATION>(); }\n    HIPRT_DEVICE float get_second_roughness_weight(int material_index) const { return anisotropy_and_rotation_and_second_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS_WEIGHT>(); }\n    HIPRT_DEVICE float get_second_roughness(int material_index) const { return anisotropy_and_rotation_and_second_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedAnisotropyGroupIndices::PACKED_SECOND_ROUGHNESS>(); }\n    HIPRT_DEVICE bool get_do_metallic_energy_compensation(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::METALLIC_ENERGY_COMPENSATION>(); }\n\n    HIPRT_DEVICE float get_specular(int material_index) const { return specular_and_darkening_and_coat_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedSpecularGroupIndices::PACKED_SPECULAR>(); }\n    HIPRT_DEVICE float get_specular_tint(int material_index) const { return specular_color_and_tint_factor[material_index].get_float(); }\n    HIPRT_DEVICE ColorRGB32F get_specular_color(int material_index) const { return specular_color_and_tint_factor[material_index].get_color(); }\n    HIPRT_DEVICE float get_specular_darkening(int material_index) const { return specular_and_darkening_and_coat_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedSpecularGroupIndices::PACKED_SPECULAR_DARKENING>(); }\n    HIPRT_DEVICE bool get_do_specular_energy_compensation(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::SPECULAR_ENERGY_COMPENSATION>(); }\n\n    HIPRT_DEVICE float get_coat(int material_index) const { return coat_and_medium_absorption[material_index].get_float(); }\n    HIPRT_DEVICE ColorRGB32F get_coat_medium_absorption(int material_index) const { return coat_and_medium_absorption[material_index].get_color(); }\n    HIPRT_DEVICE float get_coat_medium_thickness(int material_index) const { return this->coat_medium_thickness[material_index]; }\n    HIPRT_DEVICE float get_coat_roughness(int material_index) const { return specular_and_darkening_and_coat_roughness[material_index].get_float<DevicePackedEffectiveMaterial::PackedSpecularGroupIndices::PACKED_COAT_ROUGHNESS>(); }\n    HIPRT_DEVICE float get_coat_roughening(int material_index) const { return coat_roughening_darkening_anisotropy_and_rotation[material_index].get_float<DevicePackedEffectiveMaterial::PackedCoatGroupIndices::PACKED_COAT_ROUGHENING>(); }\n    HIPRT_DEVICE float get_coat_darkening(int material_index) const { return coat_roughening_darkening_anisotropy_and_rotation[material_index].get_float<DevicePackedEffectiveMaterial::PackedCoatGroupIndices::PACKED_COAT_DARKENING>(); }\n    HIPRT_DEVICE float get_coat_anisotropy(int material_index) const { return coat_roughening_darkening_anisotropy_and_rotation[material_index].get_float<DevicePackedEffectiveMaterial::PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY>(); }\n    HIPRT_DEVICE float get_coat_anisotropy_rotation(int material_index) const { return coat_roughening_darkening_anisotropy_and_rotation[material_index].get_float<DevicePackedEffectiveMaterial::PackedCoatGroupIndices::PACKED_COAT_ANISOTROPY_ROTATION>(); }\n    HIPRT_DEVICE float get_coat_ior(int material_index) const { return this->coat_ior[material_index]; }\n    HIPRT_DEVICE bool get_do_coat_energy_compensation(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::CLEARCOAT_ENERGY_COMPENSATION>(); }\n\n    HIPRT_DEVICE float get_sheen(int material_index) const { return sheen_and_color[material_index].get_float(); }\n    HIPRT_DEVICE float get_sheen_roughness(int material_index) const { return sheen_roughness_transmission_dispersion_thin_film[material_index].get_float<DevicePackedEffectiveMaterial::PackedSheenRoughnessGroupIndices::PACKED_SHEEN_ROUGHNESS>(); }\n    HIPRT_DEVICE ColorRGB32F get_sheen_color(int material_index) const { return sheen_and_color[material_index].get_color(); }\n\n    HIPRT_DEVICE float get_ior(int material_index) const { return this->ior[material_index]; }\n    HIPRT_DEVICE float get_specular_transmission(int material_index) const { return sheen_roughness_transmission_dispersion_thin_film[material_index].get_float<DevicePackedEffectiveMaterial::PackedSheenRoughnessGroupIndices::PACKED_SPECULAR_TRANSMISSION>(); }\n    HIPRT_DEVICE float get_diffuse_transmission(int material_index) const { return metallic_F82_packed_and_diffuse_transmission[material_index].get_float(); }\n    HIPRT_DEVICE float get_absorption_at_distance(int material_index) const { return this->absorption_at_distance[material_index]; }\n    HIPRT_DEVICE ColorRGB32F get_absorption_color(int material_index) const { return absorption_color_packed[material_index].get_color(); }\n    HIPRT_DEVICE float get_dispersion_scale(int material_index) const { return sheen_roughness_transmission_dispersion_thin_film[material_index].get_float<DevicePackedEffectiveMaterial::PackedSheenRoughnessGroupIndices::PACKED_DISPERSION_SCALE>(); }\n    HIPRT_DEVICE float get_dispersion_abbe_number(int material_index) const { return this->dispersion_abbe_number[material_index]; }\n    HIPRT_DEVICE bool get_thin_walled(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::PACKED_THIN_WALLED >(); }\n    HIPRT_DEVICE bool get_do_glass_energy_compensation(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::GLASS_ENERGY_COMPENSATION>(); }\n\n    HIPRT_DEVICE float get_thin_film(int material_index) const { return sheen_roughness_transmission_dispersion_thin_film[material_index].get_float<DevicePackedEffectiveMaterial::PackedSheenRoughnessGroupIndices::PACKED_THIN_FILM>(); }\n    HIPRT_DEVICE float get_thin_film_ior(int material_index) const { return this->thin_film_ior[material_index]; }\n    HIPRT_DEVICE float get_thin_film_thickness(int material_index) const { return this->thin_film_thickness[material_index]; }\n    HIPRT_DEVICE float get_thin_film_kappa_3(int material_index) const { return this->thin_film_kappa_3[material_index]; }\n    HIPRT_DEVICE float get_thin_film_hue_shift_degrees(int material_index) const { return alpha_thin_film_hue_dielectric_priority[material_index].get_float<DevicePackedEffectiveMaterial::PackedAlphaOpacityGroupIndices::PACKED_THIN_FILM_HUE_SHIFT>(); }\n    HIPRT_DEVICE float get_thin_film_base_ior_override(int material_index) const { return this->thin_film_base_ior_override[material_index]; }\n    HIPRT_DEVICE bool get_thin_film_do_ior_override(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::PACKED_THIN_FILM_DO_IOR_OVERRIDE>(); }\n\n    HIPRT_DEVICE float get_alpha_opacity(int material_index) const { return alpha_thin_film_hue_dielectric_priority[material_index].get_float<DevicePackedEffectiveMaterial::PackedAlphaOpacityGroupIndices::PACKED_ALPHA_OPACITY>(); }\n    HIPRT_DEVICE unsigned char get_dielectric_priority(int material_index) const { return alpha_thin_film_hue_dielectric_priority[material_index].get_uchar<DevicePackedEffectiveMaterial::PackedAlphaOpacityGroupIndices::PACKED_DIELECTRIC_PRIORITY>(); }\n\n    HIPRT_DEVICE unsigned char get_energy_preservation_monte_carlo_samples(int material_index) const { return alpha_thin_film_hue_dielectric_priority[material_index].get_uchar<DevicePackedEffectiveMaterial::PackedAlphaOpacityGroupIndices::PACKED_ENERGY_PRESERVATION_SAMPLES>(); }\n    HIPRT_DEVICE bool get_enforce_strong_energy_conservation(int material_index) const { return flags[material_index].get_bool<DevicePackedEffectiveMaterial::PackedFlagsIndices::PACKED_ENFORCE_STRONG_ENERGY_CONSERVATION>(); }\n\n    // Packed flags of the material:\n    //  - thin_walled\n    //      Is the material thin walled? i.e. it doesn't have an interior and light doesn't\n    //      bend as it goes through\n    // \n    //  - emissive_texture_used\n    //      Does the material use an emissive texture?\n    // \n    //  - thin_film_do_ior_override\n    //      Whether or not to override the IORs used for the base material on top of which\n    //      the thin film sits.\n    // \n    //  - enforce_strong_energy_conservation\n    //      If true, 'energy_preservation_monte_carlo_samples' will be used\n    //      to compute the directional albedo of this material.\n    //      This computed directional albedo is then used to ensure perfect energy conservation\n    //      and preservation. \n    // \n    //      This is however very expensive.\n    //      This is usually only needed on clearcoated materials (but even then, the energy loss due to the absence of multiple scattering between\n    //      the clearcoat layer and the BSDF below may be acceptable).\n    // \n    //      Non-clearcoated materials can already ensure perfect (modulo implementation quality) energy \n    //      conservation/preservation with the precomputed LUTs [Turquin, 2019]. \n    // \n    //      See PrincipledBSDFDoEnergyCompensation in this codebase.\n    //\n    //      Values from the 'PackedFlagsIndices' enum should be used\n    //      to retrieve/set from the packed flags\n    UChar8BoolsPacked* flags = nullptr;\n\n    // Full range emission\n    ColorRGB32F* emission = nullptr;\n\n    // Base color RGB 3x8 bits + roughness uchar [float in [0,1] packed in 8 bit]\n    ColorRGB24bFloat0_1Packed* base_color_roughness = nullptr;\n\n    float* oren_nayar_sigma = nullptr;\n\n    // Parameters for Adobe 2023 F82-tint model\n    // Packs the SDR F90 color and the metalness parameter\n    ColorRGB24bFloat0_1Packed* metallic_F90_and_metallic;\n    ColorRGB24bFloat0_1Packed* metallic_F82_packed_and_diffuse_transmission;\n    float* metallic_F90_falloff_exponent = nullptr;\n\n    Float4xPacked* anisotropy_and_rotation_and_second_roughness = nullptr;\n\n    // Packed specular color and the intensity of the tint\n    // \n    // Specular tint intensity: Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n    ColorRGB24bFloat0_1Packed* specular_color_and_tint_factor = nullptr;\n\n    // Packed:\n    //  - specular_darkening\n    //      Same as coat darkening but for total internal reflection inside the specular layer\n    //      that sits on top of the diffuse base\n    //\n    //      Disabled by default for artistic \"expectations\"\n    //\n    //  - Specular\n    //      Specular intensity\n    //\n    //  - Coat roughness\n    //      Roughness of the coat \n    // TODO: PACKED 1 FLOAT IS UNUSED IN HERE\n    Float4xPacked* specular_and_darkening_and_coat_roughness = nullptr;\n    float* coat_medium_thickness = nullptr;\n\n    // Packed:\n    //  - Coat\n    //      Intensity of the coat. 0.0f disables the coating\n    //\n    //  - Coat medium absorption color \n    ColorRGB24bFloat0_1Packed* coat_and_medium_absorption = nullptr;\n\n    // Packed:\n    //  - Coat roughening\n    //      Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n    //      i.e. the specular/metallic/transmission layers.\n    // \n    //      The option is however given here to artistically disable\n    //      that behavior by using coat roughening = 0.0f.\n    //\n    //  - Coat darkening\n    //      Because of the total internal reflection that can happen inside the coat layer (i.e.\n    //      light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n    //      clearcoat will appear will increased saturation.\n    //     \n    //  - Coat anisotropy\n    //  - Coat anisotropy rotation\n    Float4xPacked* coat_roughening_darkening_anisotropy_and_rotation = nullptr;\n    float* coat_ior = nullptr;\n\n    // Packed:\n    //  - Sheen intensity. 0.0f disables the sheen effect\n    //\n    //  - Sheen color\n    ColorRGB24bFloat0_1Packed* sheen_and_color = nullptr;\n\n    // IOR of the base material\n    float* ior = nullptr;\n\n    // Packed:\n    //  - Absorption color\n    //      Color of the light absorption when traveling through the medium\n    // TODO: PACKED FLOAT IS UNUSED IN HERE\n    ColorRGB24bFloat0_1Packed* absorption_color_packed = nullptr;\n    float* absorption_at_distance = nullptr;\n\n    // Packed:\n    //  - Sheen roughness\n    // \n    //  - Specular transmission\n    //      How much light is transmitted through the material. This essentially controls the glass lobe\n    //\n    //  - Dispersion scale\n    //      Intensity of the dispersion effect in glass objects\n    //\n    //  - Thin film\n    //      Intensity of the thin-film effect\n    Float4xPacked* sheen_roughness_transmission_dispersion_thin_film = nullptr;\n\n    float* dispersion_abbe_number = nullptr;\n    float* thin_film_ior = nullptr;\n    float* thin_film_thickness = nullptr;\n    float* thin_film_kappa_3 = nullptr;\n    float* thin_film_base_ior_override = nullptr;\n\n    // Packed:\n    //  - Alpha opacity\n    //      1.0f makes the material completely opaque\n    //      0.0f completely transparent (becomes invisible)\n    //\n    //  - Thin film hue shift in degrees\n    // \n    //  - Dielectric priority\n    //      Nested dielectric with priority parameter\n    //\n    //  - Energy preservation samples\n    //      How many samples will be computed for the integration of the directional\n    //      when the strong energy preservation/conservation of the material is enabled\n    Float2xUChar2xPacked* alpha_thin_film_hue_dielectric_priority = nullptr;\n};\n\nstruct DevicePackedTexturedMaterialSoA : public DevicePackedEffectiveMaterialSoA\n{\n    HIPRT_DEVICE unsigned short int get_normal_map_texture_index(int material_index) const { return normal_map_emission_index[material_index].get_value<DevicePackedTexturedMaterial::NormalMapEmissionIndices::NORMAL_MAP_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_emission_texture_index(int material_index) const { return normal_map_emission_index[material_index].get_value<DevicePackedTexturedMaterial::NormalMapEmissionIndices::EMISSION_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_base_color_texture_index(int material_index) const { return base_color_roughness_metallic_index[material_index].get_value<DevicePackedTexturedMaterial::BaseColorRoughnessMetallicIndices::BASE_COLOR_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_roughness_metallic_texture_index(int material_index) const { return base_color_roughness_metallic_index[material_index].get_value<DevicePackedTexturedMaterial::BaseColorRoughnessMetallicIndices::ROUGHNESS_METALLIC_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_roughness_texture_index(int material_index) const { return roughness_and_metallic_index[material_index].get_value<DevicePackedTexturedMaterial::RoughnessAndMetallicIndices::ROUGHNESS_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_metallic_texture_index(int material_index) const { return roughness_and_metallic_index[material_index].get_value<DevicePackedTexturedMaterial::RoughnessAndMetallicIndices::METALLIC_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_anisotropic_texture_index(int material_index) const { return roughness_and_metallic_index[material_index].get_value<DevicePackedTexturedMaterial::AnisotropicSpecularIndices::ANISOTROPIC_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_specular_texture_index(int material_index) const { return anisotropic_specular_index[material_index].get_value<DevicePackedTexturedMaterial::AnisotropicSpecularIndices::SPECULAR_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_coat_texture_index(int material_index) const { return coat_sheen_index[material_index].get_value<DevicePackedTexturedMaterial::CoatSheenIndices::COAT_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_sheen_texture_index(int material_index) const { return coat_sheen_index[material_index].get_value<DevicePackedTexturedMaterial::CoatSheenIndices::SHEEN_INDEX>(); }\n    HIPRT_DEVICE unsigned short int get_specular_transmission_texture_index(int material_index) const { return specular_transmission_index[material_index].get_value<DevicePackedTexturedMaterial::SpecularTransmissionIndex::SPECULAR_TRANSMISSION_INDEX>(); }\n\n    /**\n     * The 'DevicePackedTexturedMaterial' returned contains all the data read \n     * in all the arrays the SoA material struct for the given 'material_index'\n     * \n     * Note that, for example, even if the material has its 'coat' parameter at 0.0f\n     * (i.e. it doesn't use coat at all), all the coat parameters (coat thickness, roughness, anisotropy, .....) \n     * will be loaded from global memory and this will be useless global memory accesses\n     * \n     * A more performant alternative of this function is 'read_partial_material(int material_index)'\n     * \n     * This function is fully commented out because it's actually never used\n     */\n    //HIPRT_DEVICE DevicePackedTexturedMaterial read_full_textured_material(int material_index) const\n    //{\n    //    DevicePackedTexturedMaterial out;\n\n    //    out.set_normal_map_texture_index(this->get_normal_map_texture_index(material_index));\n    //    out.set_emission_texture_index(this->get_emission_texture_index(material_index));\n    //    out.set_base_color_texture_index(this->get_base_color_texture_index(material_index));\n\n    //    out.set_roughness_metallic_texture_index(this->get_roughness_metallic_texture_index(material_index));\n    //    out.set_roughness_texture_index(this->get_roughness_texture_index(material_index));\n    //    out.set_metallic_texture_index(this->get_metallic_texture_index(material_index));\n    //    out.set_anisotropic_texture_index(this->get_anisotropic_texture_index(material_index));\n\n    //    out.set_specular_texture_index(this->get_specular_texture_index(material_index));\n    //    out.set_coat_texture_index(this->get_coat_texture_index(material_index));\n    //    out.set_sheen_texture_index(this->get_sheen_texture_index(material_index));\n    //    out.set_specular_transmission_texture_index(this->get_specular_transmission_texture_index(material_index));\n\n\n\n\n\n\n    //    out.set_emission(this->get_emission(material_index));\n    //    out.set_emissive_texture_used(this->get_emissive_texture_used(material_index));\n\n    //    out.set_base_color(this->get_base_color(material_index));\n\n    //    out.set_roughness(this->get_roughness(material_index));\n    //    out.set_oren_nayar_sigma(this->get_oren_nayar_sigma(material_index));\n\n    //    // Parameters for Adobe 2023 F82-tint model\n    //    out.set_metallic(this->get_metallic(material_index));\n    //    out.set_metallic_F90_falloff_exponent(this->get_metallic_F90_falloff_exponent(material_index));\n    //    // F0 is not here as it uses the 'base_color' of the material\n    //    out.set_metallic_F82(this->get_metallic_F82(material_index));\n    //    out.set_metallic_F90(this->get_metallic_F90(material_index));\n    //    out.set_anisotropy(this->get_anisotropy(material_index));\n    //    out.set_anisotropy_rotation(this->get_anisotropy_rotation(material_index));\n    //    out.set_second_roughness_weight(this->get_second_roughness_weight(material_index));\n    //    out.set_second_roughness(this->get_second_roughness(material_index));\n    //    out.set_metallic_energy_compensation(this->get_do_metallic_energy_compensation(material_index));\n\n    //    // Specular intensity\n    //    out.set_specular(this->get_specular(material_index));\n    //    // Specular tint intensity. \n    //    // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n    //    out.set_specular_tint(this->get_specular_tint(material_index));\n    //    out.set_specular_color(this->get_specular_color(material_index));\n    //    // Same as coat darkening but for total internal reflection inside the specular layer\n    //    // that sits on top of the diffuse base\n    //    //\n    //    // Disabled by default for artistic \"expectations\"\n    //    out.set_specular_darkening(this->get_specular_darkening(material_index));\n    //    out.set_specular_energy_compensation(this->get_do_specular_energy_compensation(material_index));\n\n    //    out.set_coat(this->get_coat(material_index));\n    //    out.set_coat_medium_absorption(this->get_coat_medium_absorption(material_index));\n    //    // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n    //    // that will happen inside the coat\n    //    out.set_coat_medium_thickness(this->get_coat_medium_thickness(material_index));\n    //    out.set_coat_roughness(this->get_coat_roughness(material_index));\n    //    // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n    //    // i.e. the specular/metallic/transmission layers.\n    //    // \n    //    // The option is however given here to artistically disable\n    //    // that behavior by using coat roughening = 0.0f.\n    //    out.set_coat_roughening(this->get_coat_roughening(material_index));\n    //    // Because of the total internal reflection that can happen inside the coat layer (i.e.\n    //    // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n    //    // clearcoat will appear will increased saturation.\n    //    out.set_coat_darkening(this->get_coat_darkening(material_index));\n    //    out.set_coat_anisotropy(this->get_coat_anisotropy(material_index));\n    //    out.set_coat_anisotropy_rotation(this->get_coat_anisotropy_rotation(material_index));\n    //    out.set_coat_ior(this->get_coat_ior(material_index));\n    //    out.set_coat_energy_compensation(this->get_do_coat_energy_compensation(material_index));\n\n    //    out.set_sheen(this->get_sheen(material_index)); // Sheen strength\n    //    out.set_sheen_roughness(this->get_sheen_roughness(material_index));\n    //    out.set_sheen_color(this->get_sheen_color(material_index));\n\n    //    out.set_ior(this->get_ior(material_index));\n    //    out.set_specular_transmission(this->get_specular_transmission(material_index));\n\n    //    // At what distance is the light absorbed to the given absorption_color\n    //    out.set_absorption_at_distance(this->get_absorption_at_distance(material_index));\n    //    // Color of the light absorption when traveling through the medium\n    //    out.set_absorption_color(this->get_absorption_color(material_index));\n    //    out.set_dispersion_scale(this->get_dispersion_scale(material_index));\n    //    out.set_dispersion_abbe_number(this->get_dispersion_abbe_number(material_index));\n    //    out.set_thin_walled(this->get_thin_walled(material_index));\n    //    out.set_glass_energy_compensation(this->get_do_glass_energy_compensation(material_index));\n\n    //    out.set_thin_film(this->get_thin_film(material_index));\n    //    out.set_thin_film_ior(this->get_thin_film_ior(material_index));\n    //    out.set_thin_film_thickness(this->get_thin_film_thickness(material_index));\n    //    out.set_thin_film_kappa_3(this->get_thin_film_kappa_3(material_index));\n    //    // Sending the hue film in [0, 1] to the GPU\n    //    out.set_thin_film_hue_shift_degrees(get_thin_film_hue_shift_degrees(material_index));\n    //    out.set_thin_film_base_ior_override(this->get_thin_film_base_ior_override(material_index));\n    //    out.set_thin_film_do_ior_override(this->get_thin_film_do_ior_override(material_index));\n\n    //    // 1.0f makes the material completely opaque\n    //    // 0.0f completely transparent (becomes invisible)\n    //    out.set_alpha_opacity(this->get_alpha_opacity(material_index));\n\n    //    // Nested dielectric parameter\n    //    out.set_dielectric_priority(this->get_dielectric_priority(material_index));\n\n    //    out.set_energy_preservation_monte_carlo_samples(this->get_energy_preservation_monte_carlo_samples(material_index));\n    //    // If true, 'energy_preservation_monte_carlo_samples' will be used\n    //    // to compute the directional albedo of this material.\n    //    // This computed directional albedo is then used to ensure perfect energy conservation\n    //    // and preservation. \n    //    // \n    //    // This is however very expensive.\n    //    // This is usually only needed on clearcoated materials (but even then, the energy loss due to the absence of multiple scattering between\n    //    // the clearcoat layer and the BSDF below may be acceptable).\n    //    // \n    //    // Non-clearcoated materials can already ensure perfect (modulo implementation quality) energy \n    //    // conservation/preservation with the precomputed LUTs [Turquin, 2019]. \n    //    // \n    //    // See PrincipledBSDFDoEnergyCompensation in this codebase.\n    //    out.set_enforce_strong_energy_conservation(this->get_enforce_strong_energy_conservation(material_index));\n\n    //    return out;\n    //}\n\n    /**\n     * Only reads the relevant parameters of the material based on what parameters this material is using.\n     * For example, if the 'coat' parameter of the material is 0.0f (i.e. the coat isn't used), none of the\n     * coat parameters will be read from global memory which saves on memory traffic\n     */\n    HIPRT_DEVICE DevicePackedTexturedMaterial read_partial_material(int material_index) const\n    {\n        DevicePackedTexturedMaterial out;\n\n        out.set_normal_map_texture_index(this->get_normal_map_texture_index(material_index));\n        out.set_emission_texture_index(this->get_emission_texture_index(material_index));\n        out.set_base_color_texture_index(this->get_base_color_texture_index(material_index));\n\n        out.set_roughness_metallic_texture_index(this->get_roughness_metallic_texture_index(material_index));\n        out.set_roughness_texture_index(this->get_roughness_texture_index(material_index));\n        out.set_metallic_texture_index(this->get_metallic_texture_index(material_index));\n        out.set_anisotropic_texture_index(this->get_anisotropic_texture_index(material_index));\n\n        out.set_specular_texture_index(this->get_specular_texture_index(material_index));\n        out.set_coat_texture_index(this->get_coat_texture_index(material_index));\n        out.set_sheen_texture_index(this->get_sheen_texture_index(material_index));\n        out.set_specular_transmission_texture_index(this->get_specular_transmission_texture_index(material_index));\n\n\n\n\n\n        out.set_emissive_texture_used(this->get_emissive_texture_used(material_index));\n        if (!out.get_emissive_texture_used())\n            // Only loading the emission if no emissive texture is used\n            out.set_emission(this->get_emission(material_index));\n\n        if (out.get_base_color_texture_index() == MaterialConstants::NO_TEXTURE)\n            // Only reading the base color if no base color texture is used\n            // (because if we have a base color texture, it's going to override\n            // the base color parameter anyway)\n            out.set_base_color(this->get_base_color(material_index));\n\n        if (out.get_roughness_texture_index() == MaterialConstants::NO_TEXTURE && out.get_roughness_metallic_texture_index() == MaterialConstants::NO_TEXTURE)\n            // Same for the roughness\n            out.set_roughness(this->get_roughness(material_index));\n\n        out.set_oren_nayar_sigma(this->get_oren_nayar_sigma(material_index));\n\n        // Parameters for Adobe 2023 F82-tint model\n        // Only reading the metallic if no metallic texture is used\n        // (because if we have a metallic texture, it's going to override\n        // the metallic parameter anyway)\n        out.set_metallic(this->get_metallic(material_index));\n        if (out.get_metallic() > 0.0f || out.get_metallic_texture_index() != MaterialConstants::NO_TEXTURE || out.get_roughness_metallic_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            // If the metallic parameter isn't 0.0f, i.e. the material does have a metallic lobe,\n            // then and only then do we need to load the metallic parameters\n\n            out.set_metallic_F90_falloff_exponent(this->get_metallic_F90_falloff_exponent(material_index));\n            // F0 is not here as it uses the 'base_color' of the material\n            out.set_metallic_F82(this->get_metallic_F82(material_index));\n            out.set_metallic_F90(this->get_metallic_F90(material_index));\n\n            out.set_second_roughness_weight(this->get_second_roughness_weight(material_index));\n            out.set_second_roughness(this->get_second_roughness(material_index));\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoMetallicEnergyCompensation == KERNEL_OPTION_TRUE\n            out.set_metallic_energy_compensation(this->get_do_metallic_energy_compensation(material_index));\n#endif\n        }\n\n        if(out.get_anisotropic_texture_index() == MaterialConstants::NO_TEXTURE)\n            out.set_anisotropy(this->get_anisotropy(material_index));\n        out.set_anisotropy_rotation(this->get_anisotropy_rotation(material_index));\n\n        // Specular intensity\n        out.set_specular(this->get_specular(material_index));\n        if (out.get_specular() > 0.0f || out.get_specular_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            // We only need to read the various specular parameters if the material actually has a specular lobe\n\n            // Specular tint intensity. \n            out.set_specular_tint(this->get_specular_tint(material_index));\n            // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n            out.set_specular_color(this->get_specular_color(material_index));\n            // Same as coat darkening but for total internal reflection inside the specular layer\n            // that sits on top of the diffuse base\n            //\n            // Disabled by default for artistic \"expectations\"\n            out.set_specular_darkening(this->get_specular_darkening(material_index));\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoSpecularEnergyCompensation == KERNEL_OPTION_TRUE\n            out.set_specular_energy_compensation(this->get_do_specular_energy_compensation(material_index));\n#endif\n        }\n\n        out.set_coat(this->get_coat(material_index));\n        if (out.get_coat() > 0.0f || out.get_coat_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            // We only need to read the coat parameters if the material has a coat lobe\n            // (which is when out.get_coat() > 0.0f)\n\n            out.set_coat_medium_absorption(this->get_coat_medium_absorption(material_index));\n            // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n            // that will happen inside the coat\n            out.set_coat_medium_thickness(this->get_coat_medium_thickness(material_index));\n            out.set_coat_roughness(this->get_coat_roughness(material_index));\n            // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n            // i.e. the specular/metallic/transmission layers.\n            // \n            // The option is however given here to artistically disable\n            // that behavior by using coat roughening = 0.0f.\n            out.set_coat_roughening(this->get_coat_roughening(material_index));\n            // Because of the total internal reflection that can happen inside the coat layer (i.e.\n            // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n            // clearcoat will appear will increased saturation.\n            out.set_coat_darkening(this->get_coat_darkening(material_index));\n            out.set_coat_anisotropy(this->get_coat_anisotropy(material_index));\n            out.set_coat_anisotropy_rotation(this->get_coat_anisotropy_rotation(material_index));\n            out.set_coat_ior(this->get_coat_ior(material_index));\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoClearcoatEnergyCompensation == KERNEL_OPTION_TRUE\n            out.set_coat_energy_compensation(this->get_do_coat_energy_compensation(material_index));\n#endif\n        }\n\n        out.set_sheen(this->get_sheen(material_index)); // Sheen strength\n        if (out.get_sheen() > 0.0f || out.get_sheen_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            out.set_sheen_roughness(this->get_sheen_roughness(material_index));\n            out.set_sheen_color(this->get_sheen_color(material_index));\n        }\n\n        out.set_ior(this->get_ior(material_index));\n\n        out.set_diffuse_transmission(this->get_diffuse_transmission(material_index));\n        out.set_specular_transmission(this->get_specular_transmission(material_index));\n        if (out.get_specular_transmission() > 0.0f || out.get_specular_transmission_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            // This is all specific to specular transmission\n            out.set_dispersion_scale(this->get_dispersion_scale(material_index));\n            out.set_dispersion_abbe_number(this->get_dispersion_abbe_number(material_index));\n            out.set_thin_walled(this->get_thin_walled(material_index));\n\n#if PrincipledBSDFDoEnergyCompensation == KERNEL_OPTION_TRUE && PrincipledBSDFDoGlassEnergyCompensation == KERNEL_OPTION_TRUE\n            out.set_glass_energy_compensation(this->get_do_glass_energy_compensation(material_index));\n#endif\n        }\n\n        if (out.get_specular_transmission() > 0.0f || out.get_diffuse_transmission() > 0.0f || out.get_specular_transmission_texture_index() != MaterialConstants::NO_TEXTURE)\n        {\n            // This is also applicable to diffuse transmission\n            \n            // At what distance is the light absorbed to the given absorption_color\n            out.set_absorption_at_distance(this->get_absorption_at_distance(material_index));\n            // Color of the light absorption when traveling through the medium\n            out.set_absorption_color(this->get_absorption_color(material_index));\n        }\n\n        out.set_thin_film(this->get_thin_film(material_index));\n        if (out.get_thin_film() > 0.0f)\n        {\n            out.set_thin_film_ior(this->get_thin_film_ior(material_index));\n            out.set_thin_film_thickness(this->get_thin_film_thickness(material_index));\n            out.set_thin_film_kappa_3(this->get_thin_film_kappa_3(material_index));\n            // Sending the hue film in [0, 1] to the GPU\n            out.set_thin_film_hue_shift_degrees(get_thin_film_hue_shift_degrees(material_index));\n            out.set_thin_film_base_ior_override(this->get_thin_film_base_ior_override(material_index));\n            out.set_thin_film_do_ior_override(this->get_thin_film_do_ior_override(material_index));\n        }\n\n        // 1.0f makes the material completely opaque\n        // 0.0f completely transparent (becomes invisible)\n        out.set_alpha_opacity(this->get_alpha_opacity(material_index));\n\n        // Nested dielectric parameter\n        out.set_dielectric_priority(this->get_dielectric_priority(material_index));\n\n        return out;\n    }\n\n    Uint2xPacked* normal_map_emission_index = nullptr;\n    // If the roughness_metallic texture index is not MaterialConstants::NO_TEXTURE, \n    // then there is only one texture for the metallic and the roughness parameters in which.\n    // case the green channel is the roughness and the blue channel is the metalness.\n    Uint2xPacked* base_color_roughness_metallic_index = nullptr;\n    Uint2xPacked* roughness_and_metallic_index = nullptr;\n    Uint2xPacked* anisotropic_specular_index = nullptr;\n    Uint2xPacked* coat_sheen_index = nullptr;\n    // TODO: 1 PACKED UINT IS UNUSED IN HERE\n    Uint2xPacked* specular_transmission_index = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialUnpacked.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_UNPACKED_H\n#define HOST_DEVICE_COMMON_MATERIAL_UNPACKED_H\n\n//#include \"HostDeviceCommon/Material/MaterialUtils.h\"\n\n/**\n * How to add a material property:\n * \n * 1)   MaterialUnpacked.h\n *\n *      Add the unpacked device material structure what the GPU is going to need in the shaders\n *      For most parameters, this is just the parameters itself\n * \n *      For some other parameters, some stuff can be precomputed on the CPU and the GPU\n *      can then use only the precomputed stuff. In these cases, you need to add the precomputed\n *      stuff in here. The data needed for the precomputation is only going to be stored\n *      in the CPUMaterial, step 2).\n *      \n *      An example of a precomputed parameter is the emission. On the GPU, the emission is a simple\n *      color but on the CPU the emission is a color + emission strength.\n *      The emission strength is precomputed (multiplied/factored in) into the emission when\n *      packed into the material that the GPU uses\n *      \n * 2)   CPUMaterial.h\n *\n *      Add the parameter to the CPUMaterial structure. Read step 1) for precomputed parameters.\n * \n *      If the parameter needs clamping to avoid NaNs/singularities/numerical imprecisions, the clamping\n *      must be done in CPUMaterial::make_safe()\n * \n *      You also need to add a line in CPUMaterial::pack_to_gpu() to define how the GPUMaterial (whose data is packed).\n *      This is most likely just a .set() call like all the other parameters. That setter will be defined in step 3).\n *      If you have some precomputation to do (such as with the emission), it can be done in there (look at the .set_emission() call)\n * \n * 3)   MaterialPacked.h\n *\n *      Add the parameter to the MaterialPacked structure in MaterialPacked.h (only the parameters that \n *      the GPU is going to use. So, if there is any precomputation to be done (most parameters do not have precomputation), add only what \n *      holds the precomputed result that the GPU is directly going to use, not the data needed \n *      for the precomputation (that's only in CPUMaterial).\n * \n *      The parameter will need to be packed. It can be added to a member that doesn't have all its \"fields\" filled yet (look for // TODO)\n *      or a new member needs to be created for the new parameter.\n * \n *      The new parameter can also be full-range, i.e. not packed if precision is important or packing is impractical\n * \n *      After the parameter has been added to a packed member, write the getter and setter in the same class (structure)\n * \n *      The function DevicePackedEffectiveMaterial::pack() needs to be completed (follow what is done for the other parameters).\n *      This is the function that will be called when packing the GPUMaterial into the GBuffer\n *      This looks quite a bit like pack_to_gpu() from before but there is no precomputations to be done here because the precomputation\n *      has already been done before in pack_to_gpu()\n * \n *      The function DevicePackedEffectiveMaterial::unpack() needs to be completed (follow what is done for the other parameters).\n *      This is the function that will be called when unpacking the material from the G-Buffer\n * \n *      The function DevicePackedTexturedMaterial::unpack() needs to be completed (follow what is done for the other parameters).\n *      This is the function that will be called when unpacking the material from the materials buffer (when reading the material of the geometry a ray just hit).\n *      The unpacked textured material will then be used to read the textures of the material at the hit point and the whole\n *      will result in a DevicePackedEffectiveMaterial that will be used in the rest of the shaders (or packed into the G-Buffer)\n * \n * 4)   MaterialPackedSoA.h\n * \n *      Add a getter for the parameter to DevicePackedEffectiveMaterialSoA.\n *      This is to read the parameter from the structure of arrays (one buffer per each parameter packed) given a material index\n * \n *      Add the parameter reading in DevicePackedEffectiveMaterialSoA::read_partial_material(). This function is just a handy function\n *      to produce a DevicePackedTexturedMaterial by reading all the arrays of the structure of arrays\n * \n *      Note that memory traffic can be saved in some case. Let's you're adding a bunch of parameters for a \"super metallic\" lobe:\n *      - super metallic strength\n *      - super roughness\n *      - super anisotropy\n *      - super flakes\n *      - super fresnel F82 color\n * \n *      The 'super metallic strength' parameter controls the overall strength of the super metallic lobe. \n *      If it is 0, then the super metallic lobe is disabled from the BSDF. This is a case where it is not \n *      needed to read any of the other parameters (super roughness, super anisotropy, ...) because they \n *      will not be used anyways since the super metallic lobe is disabled.\n * \n *      This logic to save memory traffic has already been applied to most of the other lobes (coat, glass, ...)\n * \n * 5)   Add controls to ImGuiObjectsWindow (and the global material overrider)\n */\n\n /**\n  * Unpacked material for use in the shaders\n  */\nstruct DeviceUnpackedEffectiveMaterial\n{\n    HIPRT_HOST_DEVICE bool is_emissive() const\n    {\n        return !hippt::is_zero(emission.r)\n            || !hippt::is_zero(emission.g)\n            || !hippt::is_zero(emission.b)\n            || emissive_texture_used;\n    }\n\n    ColorRGB32F emission = ColorRGB32F{ 0.0f, 0.0f, 0.0f };\n    ColorRGB32F base_color = ColorRGB32F(1.0f);\n\n    float roughness = 0.3f;\n    float oren_nayar_sigma = 0.34906585039886591538f; // 20 degrees standard deviation in radian\n\n    // Parameters for Adobe 2023 F82-tint model\n    float metallic = 0.0f;\n    float metallic_F90_falloff_exponent = 5.0f;\n    // F0 is not here as it uses the 'base_color' of the material\n    ColorRGB32F metallic_F82 = ColorRGB32F(1.0f);\n    ColorRGB32F metallic_F90 = ColorRGB32F(1.0f);\n    float anisotropy = 0.0f;\n    float anisotropy_rotation = 0.0f;\n    float second_roughness_weight = 0.0f;\n    float second_roughness = 0.5f;\n\n    // Specular intensity\n    float specular = 1.0f;\n    // Specular tint intensity.\n    // Specular will be white if 0.0f and will be 'specular_color' if 1.0f\n    float specular_tint = 1.0f;\n    ColorRGB32F specular_color = ColorRGB32F(1.0f);\n    // Same as coat darkening but for total internal reflection inside the specular layer\n    // that sits on top of the diffuse base\n    //\n    // Disabled by default for artistic \"expectations\"\n    float specular_darkening = 0.0f;\n\n    float coat = 0.0f;\n    ColorRGB32F coat_medium_absorption = ColorRGB32F{ 1.0f, 1.0f, 1.0f };\n    // The coat thickness influences the amount of absorption (given by 'coat_medium_absorption')\n    // that will happen inside the coat\n    float coat_medium_thickness = 5.0f;\n    float coat_roughness = 0.0f;\n    // Physical accuracy requires that a rough clearcoat also roughens what's underneath it\n    // i.e. the specular/metallic/transmission layers.\n    // \n    // The option is however given here to artistically disable\n    // that behavior by using coat roughening = 0.0f.\n    float coat_roughening = 1.0f;\n    // Because of the total internal reflection that can happen inside the coat layer (i.e.\n    // light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the\n    // clearcoat will appear will increased saturation.\n    float coat_darkening = 1.0f;\n    float coat_anisotropy = 0.0f;\n    float coat_anisotropy_rotation = 0.0f;\n    float coat_ior = 1.5f;\n\n    float sheen = 0.0f; // Sheen strength\n    float sheen_roughness = 0.5f;\n    ColorRGB32F sheen_color = ColorRGB32F(1.0f);\n\n    float ior = 1.40f;\n    float specular_transmission = 0.0f;\n    float diffuse_transmission = 0.0f;\n    // At what distance is the light absorbed to the given absorption_color\n    float absorption_at_distance = 1.0f;\n    // Color of the light absorption when traveling through the medium\n    ColorRGB32F absorption_color = ColorRGB32F(1.0f);\n    float dispersion_scale = 0.0f;\n    float dispersion_abbe_number = 20.0f;\n\n    float thin_film = 0.0f;\n    float thin_film_ior = 1.3f;\n    float thin_film_thickness = 500.0f;\n    float thin_film_kappa_3 = 0.0f;\n    float thin_film_hue_shift_degrees = 0.0f;\n    float thin_film_base_ior_override = 1.0f;\n\n    // 1.0f makes the material completely opaque\n    // 0.0f completely transparent (becomes invisible)\n    float alpha_opacity = 1.0f;\n\n    unsigned char energy_preservation_monte_carlo_samples = 12;\n    \n    /**\n     * The booleans are moved to the end of the structure to avoid too much structure packing\n     */\n\n    // Whether or not to do energy compensation of the metallic layer\n    // for that material\n    bool do_metallic_energy_compensation = true;\n    // Whether or not to do energy compensation of the specular/diffuse layer\n    // for that material\n    bool do_specular_energy_compensation = true;\n    // Whether or not to do energy compensation of the clearcoat layer\n    // for that material\n    bool do_coat_energy_compensation = true;\n    bool thin_walled = false;\n    // Whether or not to do energy compensation of the glass layer\n    // for that material\n    bool do_glass_energy_compensation = true;\n    bool thin_film_do_ior_override = false;\n\n    // If true, 'energy_preservation_monte_carlo_samples' will be used\n    // to compute the directional albedo of this material.\n    // This computed directional albedo is then used to ensure perfect energy conservation\n    // and preservation. \n    // \n    // This is however very expensive.\n    // This is usually only needed on clearcoated materials (but even then, the energy loss due to the absence of multiple scattering between\n    // the clearcoat layer and the BSDF below may be acceptable).\n    // \n    // Non-clearcoated materials can already ensure perfect (modulo implementation quality) energy \n    // conservation/preservation with the precomputed LUTs [Turquin, 2019]. \n    // \n    // See PrincipledBSDFDoEnergyCompensation in this codebase.\n    bool enforce_strong_energy_conservation = false;\n    // This member is only ever set to true on the GPU when we have the simplified material\n    // that doesn't have texture indices anymore. Then we can manually fetch the emissive texture\n    // index of the material and sample the emissive texture\n    bool emissive_texture_used = false;\n\n    HIPRT_HOST_DEVICE void set_dielectric_priority(unsigned char priority) { dielectric_priority = priority; }\n\n    HIPRT_HOST_DEVICE unsigned char get_dielectric_priority() const\n    {\n#if BSDFOverride == BSDF_LAMBERTIAN || BSDFOverride == BSDF_OREN_NAYAR\n        // These BSDFs do not support tranmission so every material\n        // should have the same priority\n        return 0;\n#else\n        return dielectric_priority;\n#endif\n    }\n\nprivate:\n        // Nested dielectric parameter\n        // Private because this may be different depending on the BRDF override\n        // being used so we want to control this with getters/setters\n        unsigned char dielectric_priority = 0;\n};\n\nstruct DeviceUnpackedTexturedMaterial : public DeviceUnpackedEffectiveMaterial\n{\n    int normal_map_texture_index = 65535;\n\n    int emission_texture_index = 65535;\n    int base_color_texture_index = 65535;\n\n    // If not 65535, there is only one texture for the metallic and the roughness parameters in which.\n    // case the green channel is the roughness and the blue channel is the metalness\n    int roughness_metallic_texture_index = 65535;\n    int roughness_texture_index = 65535;\n    int metallic_texture_index = 65535;\n    int anisotropic_texture_index = 65535;\n\n    int specular_texture_index = 65535;\n    int coat_texture_index = 65535;\n    int sheen_texture_index = 65535;\n    int specular_transmission_texture_index = 65535;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Material/MaterialUtils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_UTILS_H\n#define HOST_DEVICE_COMMON_MATERIAL_UTILS_H\n\n#include \"Device/includes/BSDFs/BSDFIncidentLightInfo.h\"\n\n#include \"HostDeviceCommon/Material/MaterialConstants.h\"\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n#include \"HostDeviceCommon/Material/MaterialUnpacked.h\"\n#include \"HostDeviceCommon/KernelOptions/PrincipledBSDFKernelOptions.h\"\n\nstruct MaterialUtils\n{\n    HIPRT_HOST_DEVICE static void get_oren_nayar_AB(float sigma, float& out_oren_A, float& out_oren_B)\n    {\n        float sigma2 = sigma * sigma;\n        out_oren_A = 1.0f - sigma2 / (2.0f * (sigma2 + 0.33f));\n        out_oren_B = 0.45f * sigma2 / (sigma2 + 0.09f);\n    }\n\n    HIPRT_HOST_DEVICE static void get_alphas(float roughness, float anisotropy, float& out_alpha_x, float& out_alpha_y)\n    {\n        float aspect = sqrtf(1.0f - 0.9f * anisotropy);\n        out_alpha_x = hippt::max(MaterialConstants::ROUGHNESS_CLAMP, roughness * roughness / aspect);\n        out_alpha_y = hippt::max(MaterialConstants::ROUGHNESS_CLAMP, roughness * roughness * aspect);\n    }\n\n    HIPRT_HOST_DEVICE static float get_thin_walled_roughness(bool thin_walled, float base_roughness, float relative_eta)\n    {\n        if (!thin_walled)\n            return base_roughness;\n\n        /*\n         * Roughness remapping so that a thin walled interface matches better a\n         * properly modeled double interface model. Said otherwise: roughness remapping\n         * so that the thin walled approximation matches the non thin walled physically correct equivalent\n         *\n         * Reference:\n         * [Revisiting Physically Based Shading at Imageworks, Christopher Kulla & Alejandro Conty, 2017]\n         *\n         * https://blog.selfshadow.com/publications/s2017-shading-course/imageworks/s2017_pbs_imageworks_slides_v2.pdf\n         */\n        float remapped = base_roughness * sqrt(3.7f * (relative_eta - 1.0f) * hippt::square(relative_eta - 0.5f) / hippt::pow_3(relative_eta));\n\n        // Remapped roughness starts going above 1.0f starting at relative eta around 1.9f\n        // and ends up at 1.39f at relative eta 3.5f\n        //\n        // Because we don't expect the user to input higher IOR values than that,\n        // we remap that remapped roughness from [0.0f, 1.39f] to [0.0f, 1.0f]\n        // and if the user inputs higher IOR values than 3.5f, we clamp to 1.0f roughness\n        // anyways\n        return hippt::clamp(0.0f, 1.0f, remapped / 1.39f);\n    }\n\n    HIPRT_HOST_DEVICE static bool is_perfectly_smooth(float roughness, float roughness_threshold = MaterialConstants::PERFECTLY_SMOOTH_ROUGHNESS_THRESHOLD)\n    {\n        return roughness <= roughness_threshold;\n    }\n\n    /**\n     * Whether or not it makes sense to even try light sampling with NEE on that material\n     *\n     * Perfectly smooth materials for example cannot do light sampling because no given light\n     * direction is going to align with the delta distribution peak of the BRDF so we can save\n     * some performance by not even attempting light sampling in the first place\n     */\n    HIPRT_HOST_DEVICE static bool can_do_light_sampling(float material_roughness, float material_metallic, float material_specular_transmission, float material_coat, float material_coat_roughness, float material_second_roughness, float material_second_roughness_weight, float roughness_threshold)\n    {\n#if DirectLightSamplingDeltaDistributionOptimization == KERNEL_OPTION_FALSE\n        return true;\n#elif PrincipledBSDFDoMicrofacetRegularization == KERNEL_OPTION_TRUE\n        // If we have BSDF regularization, everything can do light sampling now\n        return true;\n#endif\n\n#if BSDFOverride == BSDF_LAMBERTIAN || BSDFOverride == BSDF_OREN_NAYAR\n        // We can always do light sampling on these BSDFs\n        return true;\n#endif\n\n        bool smooth_base_layer = MaterialUtils::is_perfectly_smooth(material_roughness, roughness_threshold) && (material_metallic == 1.0f || material_specular_transmission == 1.0f);\n        bool smooth_coat = material_coat == 0.0f || (material_coat > 0.0f && MaterialUtils::is_perfectly_smooth(material_coat_roughness, roughness_threshold));\n        bool second_roughness_smooth = MaterialUtils::is_perfectly_smooth(material_second_roughness, roughness_threshold) || material_second_roughness_weight == 0.0f;\n        if (smooth_base_layer && smooth_coat && second_roughness_smooth)\n\t\t\t// Everything is smooth, cannot do light sampling\n            return false;\n\n        return true;\n    }\n\n    HIPRT_HOST_DEVICE static bool can_do_light_sampling(const DeviceUnpackedEffectiveMaterial& material, float roughness_threshold = MaterialConstants::PERFECTLY_SMOOTH_ROUGHNESS_THRESHOLD)\n    {\n        return can_do_light_sampling(material.roughness, material.metallic, material.specular_transmission, material.coat, material.coat_roughness, material.second_roughness, material.second_roughness_weight, roughness_threshold);\n    }\n\n    HIPRT_HOST_DEVICE static bool can_do_light_sampling(const DevicePackedEffectiveMaterial& material, float roughness_threshold = MaterialConstants::PERFECTLY_SMOOTH_ROUGHNESS_THRESHOLD)\n    {\n        return can_do_light_sampling(material.get_roughness(), material.get_metallic(), material.get_specular_transmission(), material.get_coat(), material.get_coat_roughness(), material.get_second_roughness(), material.get_second_roughness_weight(), roughness_threshold);\n    }\n\n    /**\n\t * Returns the minimum roughness of the material looking at all the active lobes\n     */\n    HIPRT_HOST_DEVICE static float minimum_roughness(const DeviceUnpackedEffectiveMaterial& material)\n    {\n\t\tfloat coat_roughness = material.coat > 0.0f ? material.coat_roughness : 1.0f;\n\t\tfloat specular_roughness = material.specular > 0.0f ? material.roughness : 1.0f;\n\t\tfloat glass_roughness = material.specular_transmission > 0.0f ? material.roughness : 1.0f;\n\t\tfloat metallic_roughness = (material.metallic > 0.0f && material.second_roughness_weight < 1.0f) ? material.roughness : 1.0f;\n\t\tfloat metallic_2_roughness = (material.metallic > 0.0f && material.second_roughness_weight > 0.0f) ? material.second_roughness : 1.0f;\n     \n\t\treturn hippt::min(coat_roughness, hippt::min(specular_roughness, hippt::min(glass_roughness, hippt::min(metallic_roughness, metallic_2_roughness))));\n    }\n\n    enum SpecularDeltaReflectionSampled : int\n    {\n        NOT_SPECULAR = -1,\n        SPECULAR_PEAK_NOT_SAMPLED = 0,\n        SPECULAR_PEAK_SAMPLED = 1,\n    };\n\n    /**\n     * Determines whether a perfectly smooth lobe has any chance of evaluating to non-0.\n     * \n     * This is only relevant for perfectly smooth materials/lobe where we don't want to evaluate the specular BRDF\n     * with anything other than a direction that was sampled directly from that specular BRDF.\n     * \n     * The 'delta_distribution_oughness' and 'delta_distribution_anisotropy' parameters here describe the BRDF lobe\n     * that is being evaluated.\n     * \n     * 'incident_light_info' is some additional information about the incident light direction used for\n     * evaluating the current lobe\n     * \n     * Returns 1 only if the specular distribution is worth evaluating, 0 if there's no point because it's going to\n     * evaluate to 0 anyways\n     * \n     * Returns -1 if the distribution given isn't specular in the first place (delta_distribution_roughness isn't very close to 0)\n     */\n    HIPRT_HOST_DEVICE static SpecularDeltaReflectionSampled is_specular_delta_reflection_sampled(const DeviceUnpackedEffectiveMaterial& material, float delta_distribution_roughness, float delta_distribution_anisotropy, BSDFIncidentLightInfo incident_light_info)\n    {\n        if (!MaterialUtils::is_perfectly_smooth(delta_distribution_roughness))\n            return SpecularDeltaReflectionSampled::NOT_SPECULAR;\n\n        // For the glass lobe sampled direction to match, we only need it to be a reflection\n        // and we need the glass lobe to be perfectly smooth\n        bool matching_base_substrate_anisotropy = hippt::abs(delta_distribution_anisotropy - material.anisotropy) < 1.0e-3f;\n        bool sampled_from_glass = incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_GLASS_REFLECT_LOBE && MaterialUtils::is_perfectly_smooth(material.roughness) && matching_base_substrate_anisotropy;\n        if (sampled_from_glass)\n            // We can stop here\n            return SpecularDeltaReflectionSampled::SPECULAR_PEAK_SAMPLED;\n\n        // Same for the metal lobe (except that it's alawys a reflection, so it's easy there)\n        bool sampled_from_first_metal = incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_FIRST_METAL_LOBE && MaterialUtils::is_perfectly_smooth(material.roughness) && matching_base_substrate_anisotropy;\n        bool sampled_from_second_metal = incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SECOND_METAL_LOBE && MaterialUtils::is_perfectly_smooth(material.second_roughness) && matching_base_substrate_anisotropy;\n        if (sampled_from_first_metal || sampled_from_second_metal)\n            // We can stop here\n            return SpecularDeltaReflectionSampled::SPECULAR_PEAK_SAMPLED;\n\n        // Same for the coat\n        bool matching_coat_anisotropy = hippt::abs(delta_distribution_anisotropy - material.coat_anisotropy) < 1.0e-3f;\n        bool sampled_from_coat = incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_COAT_LOBE && matching_coat_anisotropy && MaterialUtils::is_perfectly_smooth(material.coat_roughness);\n        if (sampled_from_coat)\n            // We can stop here\n            return SpecularDeltaReflectionSampled::SPECULAR_PEAK_SAMPLED;\n\n        // Same for the specular layer\n        bool sampled_from_specular = incident_light_info == BSDFIncidentLightInfo::LIGHT_DIRECTION_SAMPLED_FROM_SPECULAR_LOBE && MaterialUtils::is_perfectly_smooth(material.roughness) && matching_base_substrate_anisotropy;\n        if (sampled_from_specular)\n            // We can stop here\n            return SpecularDeltaReflectionSampled::SPECULAR_PEAK_SAMPLED;\n\n        return SpecularDeltaReflectionSampled::SPECULAR_PEAK_NOT_SAMPLED;\n    }\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Math.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATH_H\n#define HOST_DEVICE_COMMON_MATH_H\n\n#if defined( __KERNELCC__ )\n#include <hiprt/hiprt_device.h>\n#else\n#include <hiprt/hiprt_vec.h>\n#endif\n\n#define int2 hiprtInt2\n#define int3 hiprtInt3\n#define int4 hiprtInt4\n#define uint2 hiprtUint2\n\n#define float2 hiprtFloat2\n#define float3 hiprtFloat3\n#define float4 hiprtFloat4\n\n#define make_int2 make_hiprtInt2\n#define make_int3 make_hiprtInt3\n#define make_int4 make_hiprtInt4\n#define make_uint2 make_hiprtUint2\n\n#define make_float2 make_hiprtFloat2\n#define make_float3 make_hiprtFloat3\n#define make_float4 make_hiprtFloat4\n\n#if !defined(__KERNELCC__) || defined(HIPRT_BITCODE_LINKING)\n#include <hiprt/impl/Math.h>\n\n // For std::atomic in hippt::\n#include <atomic>\n// For std::bit_cast in hippt::\n#include <bit>\n#endif\n\n#include \"HostDeviceCommon/AtomicType.h\"\n\nstruct float4x4\n{\n\tfloat m[4][4] = { {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f, 0.0f} };\n};\n\nstruct float3x3\n{\n\tfloat m[3][3] = { {0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f}, {0.0f, 0.0f, 0.0f} };\n};\n\n// Here we're defining aliases for common functions used in shader code.\n// \n// Because the same shader code can be used both on the CPU and the GPU,\n// both code have to compile either through the classical C++ compiler or\n// through the GPU shader compiler. This means that we have to use functions\n// that were meant to be used on the CPU or on the GPU (depending on the case).\n// \n// For example, we're using glm as the math library on the CPU, so 'normalize'\n// will actually be aliased to glm::normalize for the CPU\n// but 'normalize' will be aliased to hiprt::normalize on the GPU because\n// glm isn't meant to be used on the GPU\nnamespace hippt\n{\n#ifdef __KERNELCC__\n#define M_PI hiprt::Pi\n#define M_TWO_PI\t6.28318530717958647693f // 2.0f * M_PI\n#define M_FOUR_PI\t12.5663706143591729539f // 4.0f * M_PI\n#define M_INV_PI\t0.31830988618379067154f // 1.0f / M_PI\n#define M_INV_2_PI\t0.15915494309189533577f // 1.0f / (2.0f * M_PI)\n#define M_TWO_PI_SQUARED\t19.73920880217871723767f\n#define NEAR_ZERO\t1.0e-10f\n\n\t/**\n\t * Returns the 'warpSize' runtime constant of the GPU\n\t */\n\t__device__ int warp_size() { return warpSize; }\n\t__device__ int thread_idx_x() { return threadIdx.x + blockIdx.x * blockDim.x; }\n\t__device__ int thread_idx_y() { return threadIdx.y + blockIdx.y * blockDim.y; }\n\t__device__ int thread_idx_global() { return hippt::thread_idx_x() + hippt::thread_idx_y() * blockDim.x * gridDim.x; }\n\t__device__ bool is_pixel_index(int x, int y) { return hippt::thread_idx_x() == x && hippt::thread_idx_y() == y; }\n\t__device__ int current_warp_lane() { return (threadIdx.x + threadIdx.y * blockDim.x) % hippt::warp_size(); }\n\n\ttemplate <typename T>\n\t__device__ T ldg_load(T* address) { return __ldg(address); }\n\n\t__device__ float3 cross(float3 u, float3 v) { return hiprt::cross(u, v); }\n\t__device__ float dot(float3 u, float3 v) { return hiprt::dot(u, v); }\n\n\t__device__ float length(float3 u) { return sqrt(hiprt::dot(u, u)); }\n\t__device__ float length2(float3 u) { return hiprt::dot(u, u); }\n\n\t__device__ float3 abs(float3 u) { return make_float3(fabsf(u.x), fabsf(u.y), fabsf(u.z)); }\n\t__device__ float abs(float a) { return fabsf(a); }\n\n\n\n\n\n\ttemplate <typename T>\n\t__device__ T max(T a, T b) { return a > b ? a : b; }\n\n\t/**\n\t * Component-wise max of float3 and int3\n\t */\n\ttemplate <>\n\t__device__ float3 max(float3 a, float3 b) { return make_float3(hiprt::max(a.x, b.x), hiprt::max(a.y, b.y), hiprt::max(a.z, b.z)); }\n\ttemplate <>\n\t__device__ int3 max(int3 a, int3 b) { return make_int3(hiprt::max(a.x, b.x), hiprt::max(a.y, b.y), hiprt::max(a.z, b.z)); }\n\n\n\n\n\n\ttemplate <typename T>\n\t__device__ T min(T a, T b) { return a < b ? a : b; }\n\n\t/**\n\t * Component-wise min of float3 and int3\n\t */\n\ttemplate <>\n\t__device__ float3 min(float3 a, float3 b) { return make_float3(hiprt::min(a.x, b.x), hiprt::min(a.y, b.y), hiprt::min(a.z, b.z)); }\n\ttemplate <>\n\t__device__ int3 min(int3 a, int3 b) { return make_int3(hiprt::min(a.x, b.x), hiprt::min(a.y, b.y), hiprt::min(a.z, b.z)); }\n\t/**\n\t * Minimum of each component of the float3 against x\n\t */\n\t__device__ float3 min(float3 a, float x) { return make_float3(hiprt::min(a.x, x), hiprt::min(a.y, x), hiprt::min(a.z, x)); }\n\t__device__ float3 min(float x, float3 a) { return hippt::min(a, x); }\n\n\n\n\n\n\ttemplate <typename T>\n\t__device__ T clamp(T min_val, T max_val, T val) { return hiprt::min(max_val, hiprt::max(min_val, val)); }\n\n\t__device__ float max(float a, float b) { return a > b ? a : b; }\n\t__device__ float min(float a, float b) { return a < b ? a : b; }\n\t__device__ float clamp(float min_val, float max_val, float val) { return hiprt::clamp(val, min_val, max_val); }\n\n\t__device__ float3 cos(float3 x) { return make_float3(cosf(x.x), cosf(x.y), cosf(x.z)); }\n\t__device__ float2 cos(float2 x) { return make_float2(cosf(x.x), cosf(x.y)); }\n\t__device__ float intrin_cosf(float x) { return cosf(x); } // Not using the intrinsic for now because of a compiler bug\n\n\t__device__ float3 sin(float3 x) { return make_float3(sinf(x.x), sinf(x.y), sinf(x.z)); }\n\t__device__ float2 sin(float2 x) { return make_float2(sinf(x.x), sinf(x.y)); }\n\t__device__ float intrin_sinf(float x) { return __sinf(x); }\n\n\t__device__ float3 atan2(float3 y, float3 x) { return make_float3(atan2f(y.x, x.x), atan2f(y.y, x.y), atan2f(y.z, x.z)); }\n\n\t__device__ float2 exp(float2 x) { return make_float2(expf(x.x), expf(x.y)); }\n\t__device__ float3 exp(float3 x) { return make_float3(expf(x.x), expf(x.y), expf(x.z)); }\n\t__device__ float3 ldexp(float3 x, int exp) { return make_float3(ldexpf(x.x, exp), ldexpf(x.y, exp), ldexpf(x.z, exp)); }\n\n\ttemplate <typename T>\n\t__device__ T square(T x) { return x * x; }\n\n\t__device__ float2 sqrt(float2 uv) { return make_float2(sqrtf(uv.x), sqrtf(uv.y)); }\n\t__device__ float3 sqrt(float3 uvw) { return make_float3(sqrtf(uvw.x), sqrtf(uvw.y), sqrtf(uvw.z)); }\n\n\t__device__ float pow_1_4(float x) { return sqrtf(sqrtf(x)); }\n\t__device__ constexpr float pow_3(float x) { return x * x * x; }\n\t__device__ constexpr float pow_4(float x) { float x2 = x * x; return x2 * x2; }\n\t__device__ constexpr float pow_5(float x) { float x2 = x * x; float x4 = x2 * x2; return x4 * x; }\n\t__device__ constexpr float pow_6(float x) { float x2 = x * x; float x4 = x2 * x2; return x4 * x2; }\n\n\t__device__ float intrin_pow(float x, float y) { return __powf(x, y); }\n\n\t__device__ float3 normalize(float3 u) { return hiprt::normalize(u); }\n\n\ttemplate <typename T>\n\t__device__ bool is_nan(const T& v) { return isnan(v); }\n\ttemplate <typename T>\n\t__device__ bool is_inf(const T& v) { return isinf(v); }\n\t__device__ bool is_zero(float x) { return x < NEAR_ZERO && x > -NEAR_ZERO; }\n\n\t__device__ unsigned int float_as_uint(float float_num)\n\t{\n\t\treturn __float_as_uint(float_num);\n\t}\n\n\t/**\n\t * Reads the 32-bit or 64-bit word old located at the address 'address' \n\t * in global or shared memory and stores 'value' to memory at the same address. \n\t * \n\t * These two operations are performed in one atomic transaction. \n\t * The function returns old.\n\t */\n\ttemplate <typename T>\n\t__device__ T atomic_exchange(T* address, T value) { return atomicExch(address, value); }\n\n\t/**\n\t * Reads the 32-bit or 64-bit word 'old' located at 'address' in global or shared memory,\n\t * computes the maximum of 'old' and 'value', and stores the result back to memory at the\n\t * same address.\n\t * \n\t * The function returns 'old'\n\t */\n\ttemplate <typename T>\n\t__device__ T atomic_max(T* address, T value) { return atomicMax(address, value); }\n\n\t/**\n\t * Reads the 32-bit or 64-bit word 'old' located at 'address' in global or shared memory,\n\t * computes the minimum of 'old' and 'value', and stores the result back to memory at the\n\t * same address.\n\t * \n\t * The function returns 'old'\n\t */\n\ttemplate <typename T> \n\t__device__ T atomic_min(T* address, T value) { return atomicMin(address, value); }\n\n\t/**\n\t * The function returns the value at 'address' because the increment\n\t */\n\ttemplate <typename T>\n\t__device__ T atomic_fetch_add(T* address, T increment) { return atomicAdd(address, increment); }\n\n\ttemplate <typename T>\n\t__device__ T atomic_load(T* address) { return *address; }\n\t/**\n\t * Reads the 16/32/64 bit word at the 'address' in global or shared memory, \n\t * computes(*address == expected ? new_value : *address), and stores the result\n\t * back to memory at the same address. \n\t * \n\t * These three operations are performed in one atomic transaction.\n\t * The function returns old (Compare And Swap).\n\t */\n\ttemplate <typename T>\n\t__device__ T atomic_compare_exchange(T* address, T expected, T new_value) { return atomicCAS(address, expected, new_value); }\n\n\ttemplate <>\n\t__device__ float atomic_compare_exchange(float* p, float cmp, float val) { return __int_as_float(atomicCAS((int*)p, __float_as_int(cmp), __float_as_int(val))); }\n\n\t/**\n\t * For t=0, returns a\n\t */\n\ttemplate <typename T>\n\t__device__ T lerp(T a, T b, float t) { return (1.0f - t) * a + t * b; }\n\n\t/**\n\t * For a 'value' between 'a' and 'b', returns 't' such that\n\t * (1.0f - t) * a + t * b = value\n\t * \n\t * For 'value' == 'a', returns 0.0f\n\t * For 'value' == 'b', returns 1.0f\n\t */\n\ttemplate <typename T>\n\t__device__ float inverse_lerp(T value, T a, T b)\n\t{\n\t\t// Clamping\n\t\tvalue = hippt::max(a, hippt::min(value, b));\n\n\t\treturn (value - a) / (b - a);\n\t}\n\n\t/**\n\t * Reference: https://registry.khronos.org/OpenGL-Refpages/gl4/html/smoothstep.xhtml\n\t *\n\t * For t == min, returns 0.0f\n\t * For t == max, returns 1.0f\n\t * Smoothstep interpolation in between\n\t */\n\ttemplate <typename T>\n\t__device__ T smoothstep(T min, T max, float x)\n\t{\n\t\tfloat t = hippt::clamp(0.0f, 1.0f, (x - min) / (max - min));\n\n\t\treturn t * t * (3.0f - 2.0f * t);\n\t}\n\n\t__device__ float fract(float a) { return a - floorf(a); }\n\t__device__ float asfloat(unsigned int x) { return __uint_as_float(x); }\n\t__device__ unsigned int asuint(float x) { return __float_as_uint(x); }\n\n\ttemplate <typename T>\n\t__device__ int popc(T bitmask) { return 0; }\n\ttemplate <>\n\t__device__ int popc(unsigned int bitmask) { return __popc(bitmask); }\n\ttemplate <>\n\t__device__ int popc(unsigned long long int bitmask) { return __popcll(bitmask); }\n\n\t/**\n\t * Finds the position of least signigicant bit set to 1 in a 32 bit unsigned integer.\n\t * Returs a value between 0 and 32 inclusive.\n\t *\n\t * Returns 0 if all bits are zero\n\t */\n\t__device__ unsigned int ffs(unsigned int bitmask) { return __ffs(bitmask); }\n\n\t// TODO these functions require __sync on modern NVIDIA GPUs. We should check that with __CUDACC__\n\t__device__ bool warp_any(unsigned int thread_mask, bool predicate) { return __any(predicate); }\n\t/**\n\t * Returns a bit mask whose bits are set to 1 for threads that evaluated the predicate to true.\n\t */\n\t__device__ unsigned long long int warp_ballot(unsigned int thread_mask, bool predicate) { return __ballot(predicate); }\n\t__device__ unsigned int warp_activemask() { return hippt::warp_ballot(0xFFFFFFFF, true); }\n\n\t/**\n\t * T can be a 32-bit integer type, 64-bit integer type or a single precision or double precision floating point type.\n\t * \n\t * The warp shuffle functions exchange values between threads within a warp.\n\t * \n\t * The optional width argument specifies subgroups, in which the warp can be \n\t * divided to share the variables. It has to be a power of two smaller than \n\t * or equal to warpSize. If it is smaller than warpSize, the warp is grouped \n\t * into separate groups, that are each indexed from 0 to width as if it was \n\t * its own entity, and only the lanes within that subgroup participate in the shuffle. \n\t * The lane indices in the subgroup are given by laneIdx % width.\n\t * \n\t * 'warp_shfl': The thread reads the value from the lane specified in srcLane\n\t */\n\ttemplate <typename T>\n\t__device__ T warp_shfl(T var, int src_lane, int width = warpSize) \n\t{ \n#ifdef __CUDACC__\n\t\treturn __shfl_sync(0xFFFFFFFF, var, src_lane, width); \n#else\n\t\treturn __shfl(var, src_lane, width);\n#endif\n\t}\n\n\t/**\n\t * Returns the index within its warp (not group) of the calling thread\n\t */\n\t__device__ unsigned int warp_2D_thread_index()\n\t{\n\t\t// warpSize assuming to be a power of 2 so the '&' operation\n\t\t// here is a modulo\n\t\treturn (threadIdx.x + threadIdx.y * blockDim.x) & warpSize;\n\t}\n\n#else\n#undef M_PI\n#define M_PI\t\t3.14159265358979323846f\n#define M_TWO_PI\t6.28318530717958647693f // 2.0f * M_PI\n#define M_FOUR_PI\t12.5663706143591729539f // 4.0f * M_PI\n#define M_INV_PI\t0.31830988618379067154f // 1.0f / M_PI\n#define M_INV_2_PI\t0.15915494309189533577f // 1.0f / (2.0f * M_PI)\n#define M_TWO_PI_SQUARED\t19.73920880217871723767f // 2.0f * pi^2\n#define NEAR_ZERO\t1.0e-10f\n\n\t/**\n\t * Returns the 'warpSize' runtime constant of the GPU\n\t */\n\tinline int warp_size() { return 1; }\n\tinline int thread_idx_x() { return 0; }\n\tinline int thread_idx_y() { return 0; }\n\tinline int thread_idx_global() { return 0; }\n\tinline bool is_pixel_index(int x, int y) { return false; }\n\tinline int current_warp_lane() { return 0; }\n\n\ttemplate <typename T>\n\tinline T ldg_load(T* address) { return *address; }\n\n\tinline float3 cross(float3 u, float3 v) { return hiprt::cross(u, v); }\n\tinline float dot(float3 u, float3 v) { return hiprt::dot(u, v); }\n\n\tinline float length(float3 u) { return sqrtf(dot(u, u)); }\n\tinline float length2(float3 u) { return dot(u, u); }\n\n\tinline float3 abs(float3 u) { return make_float3(std::abs(u.x), std::abs(u.y), std::abs(u.z)); }\n\tinline float abs(float a) { return std::abs(a); }\n\n\n\n\n\n\n\ttemplate <typename T>\n\tinline T max(T a, T b) { return a > b ? a : b; }\n\t/**\n\t * Component-wise max of float3 and int3\n\t */\n\ttemplate <>\n\tinline float3 max(float3 a, float3 b) { return make_float3(hiprt::max(a.x, b.x), hiprt::max(a.y, b.y), hiprt::max(a.z, b.z)); }\n\ttemplate <>\n\tinline int3 max(int3 a, int3 b) { return make_int3(hiprt::max(a.x, b.x), hiprt::max(a.y, b.y), hiprt::max(a.z, b.z)); }\n\n\n\n\n\ttemplate <typename T>\n\tinline T min(T a, T b) { return a < b ? a : b; }\n\n\t/**\n\t * Component-wise min of float3 and int3\n\t */\n\ttemplate <>\n\tinline float3 min(float3 a, float3 b) { return make_float3(hiprt::min(a.x, b.x), hiprt::min(a.y, b.y), hiprt::min(a.z, b.z)); }\n\ttemplate <>\n\tinline int3 min(int3 a, int3 b) { return make_int3(hiprt::min(a.x, b.x), hiprt::min(a.y, b.y), hiprt::min(a.z, b.z)); }\n\n\n\n\n\n\t/**\n\t * Minimum of each component of the float3 against x\n\t */\n\tinline float3 min(float3 a, float x) { return make_float3(hiprt::min(a.x, x), hiprt::min(a.y, x), hiprt::min(a.z, x)); }\n\tinline float3 min(float x, float3 a) { return hippt::min(a, x); }\n\n\ttemplate <typename T>\n\tinline T clamp(T min_val, T max_val, T val) { return hiprt::min(max_val, hiprt::max(min_val, val)); }\n\n\tinline float2 cos(float2 x) { return make_float2(std::cos(x.x), std::cos(x.y)); }\n\tinline float3 cos(float3 x) { return make_float3(std::cos(x.x), std::cos(x.y), std::cos(x.z)); }\n\tinline float intrin_cosf(float x) { return std::cos(x); }\n\n\tinline float2 sin(float2 x) { return make_float2(std::sin(x.x), std::sin(x.y)); }\n\tinline float3 sin(float3 x) { return make_float3(std::sin(x.x), std::sin(x.y), std::sin(x.z)); }\n\tinline float intrin_sinf(float x) { return std::sin(x); }\n\n\tinline float3 atan2(float3 y, float3 x) { return make_float3(atan2f(y.x, x.x), atan2f(y.y, x.y), atan2f(y.z, x.z)); }\n\n\tinline float2 exp(float2 x) { return make_float2(expf(x.x), expf(x.y)); }\n\tinline float3 exp(float3 x) { return make_float3(expf(x.x), expf(x.y), expf(x.z)); }\n\tinline float3 ldexp(float3 x, int exp) { return make_float3(std::ldexp(x.x, exp), std::ldexp(x.y, exp), std::ldexp(x.z, exp)); }\n\n\ttemplate <typename T>\n\tinline T square(T x) { return x * x; }\n\n\tinline float2 sqrt(float2 uv) { return make_float2(sqrtf(uv.x), sqrtf(uv.y)); }\n\tinline float3 sqrt(float3 uvw) { return make_float3(sqrtf(uvw.x), sqrtf(uvw.y), sqrtf(uvw.z)); }\n\tinline float pow_1_4(float x) { return sqrtf(sqrtf(x)); }\n\tinline constexpr float pow_3(float x) { return x * x * x; }\n\tinline constexpr float pow_4(float x) { float x2 = x * x; return x2 * x2; }\n\tinline constexpr float pow_5(float x) { float x2 = x * x; float x4 = x2 * x2; return x4 * x; }\n\tinline constexpr float pow_6(float x) { float x2 = x * x; float x4 = x2 * x2; return x4 * x2; }\n\n\tinline float intrin_pow(float x, float y) { return powf(x, y); }\n\n\tinline float3 normalize(float3 u) { return hiprt::normalize(u); }\n\n\ttemplate <typename T>\n\tinline bool is_nan(const T& v) { return std::isnan(v); }\n\ttemplate <typename T>\n\tinline bool is_inf(const T& v) { return std::isinf(v); }\n\tinline bool is_zero(float x) { return x < NEAR_ZERO && x > -NEAR_ZERO; }\n\n\tinline unsigned int float_as_uint(float float_num)\n\t{\n\t\treturn *reinterpret_cast<unsigned int*>(&float_num);\n\t}\n\n\t/**\n\t * Reads the 32-bit or 64-bit word old located at the address 'address'\n\t * in global or shared memory and stores 'value' to memory at the same address.\n\t *\n\t * These two operations are performed in one atomic transaction.\n\t * \n\t * The function returns old.\n\t */\n\ttemplate <typename T>\n\tT atomic_exchange(std::atomic<T>* address, T value) { return address->exchange(value); }\n\n\t/**\n\t * Reads the 32-bit or 64-bit word 'old' located at 'address' in global or shared memory, \n\t * computes the maximum of 'old' and 'value', and stores the result back to memory at the \n\t * same address. \n\t * \n\t * The function returns 'old'\n\t */\n\ttemplate <typename T>\n\tT atomic_max(std::atomic<T>* address, T value)\n\t{\n\t\tT prev_value = *address;\n\t\twhile (prev_value < value && !address->compare_exchange_weak(prev_value, value)) {}\n\n\t\treturn prev_value;\n\t}\n\n\t/**\n\t * Reads the 32-bit or 64-bit word 'old' located at 'address' in global or shared memory,\n\t * computes the minimum of 'old' and 'value', and stores the result back to memory at the\n\t * same address.\n\t * \n\t * The function returns 'old'\n\t */\n\ttemplate <typename T>\n\tT atomic_min(std::atomic<T>* address, T value)\n\t{\n\t\tT prev_value = *address;\n\t\twhile (prev_value > value && !address->compare_exchange_weak(prev_value, value)) {}\n\t\t\n\t\treturn prev_value;\n\t}\n\n\t/**\n\t * The function returns the value at 'address' because the increment\n\t */\n\ttemplate <typename T>\n\tT atomic_fetch_add(std::atomic<T>* atomic_address, T increment) { return atomic_address->fetch_add(increment); }\n\n\ttemplate <typename T>\n\tT atomic_load(std::atomic<T>* atomic_address) { return atomic_address->load(); }\n\n\t/**\n\t * Reads the 16/32/64 bit word at the 'address' in global or shared memory,\n\t * computes(*address == expected ? new_value : *address), and stores the result\n\t * back to memory at the same address.\n\t *\n\t * These three operations are performed in one atomic transaction.\n\t * \n\t * The function returns old (Compare And Swap).\n\t */\n\ttemplate <typename T>\n\tT atomic_compare_exchange(std::atomic<T>* atomic_address, T expected, T new_value)\n\t{\n\t\t// Overriding the semantic here so that it behaves the same as in CUDA i.e. returns the old value\n\t\t// instead of returning true or false (stdlib default behavior)\n\n\t\tT old = *atomic_address;\n\t\t\n\t\tatomic_address->compare_exchange_strong(expected, new_value);\n\t\t\t\n\t\treturn old;\n\t}\n\n\t/**\n\t * For t=0, returns a\n\t */\n\ttemplate <typename T>\n\tinline T lerp(T a, T b, float t) { return (1.0f - t) * a + t * b; }\n\n\t/**\n\t * For a 'value' between 'a' and 'b', returns 't' such that\n\t * (1.0f - t) * a + t * b = value\n\t *\n\t * For 'value' == 'a', returns 0.0f\n\t * For 'value' == 'b', returns 1.0f\n\t */\n\ttemplate <typename T>\n\tinline float inverse_lerp(T value, T a, T b) \n\t{ \n\t\t// Clamping\n\t\tvalue = hippt::max(a, hippt::min(value, b)); \n\t\t\n\t\treturn (value - a) / (b - a); \n\t}\n\t\n\t/**\n\t * Reference: https://registry.khronos.org/OpenGL-Refpages/gl4/html/smoothstep.xhtml\n\t * \n\t * For t == min, returns 0.0f\n\t * For t == max, returns 1.0f\n\t * Smoothstep interpolation in between\n\t */\n\ttemplate <typename T>\n\tinline T smoothstep(T min, T max, float x) \n\t{ \n\t\tfloat t = hippt::clamp(0.0f, 1.0f, (x - min) / (max - min));\n\n\t\treturn t * t * (3.0f - 2.0f * t);\n\t}\n\n\tinline float fract(float a) { return a - floorf(a); }\n\tinline float asfloat(unsigned int x) { return std::bit_cast<float, unsigned int>(x); }\n\tinline unsigned int asuint(float x) { return std::bit_cast<unsigned int, float>(x); }\n\ttemplate <typename T>\n\tinline int popc(T bitmask) { return std::popcount(bitmask); }\n\n\t/**\n\t * Finds the position of least signigicant bit set to 1 in a 32 bit unsigned integer.\n\t * Returs a value between 0 and 32 inclusive.\n\t *\n\t * Returns 0 if all bits are zero\n\t */\n\tinline unsigned int ffs(unsigned int bitmask)\n\t{\n\t\tfor (int i = 0; i < sizeof(unsigned int) * 8; i++)\n\t\t\tif (bitmask & (1 << i))\n\t\t\t\treturn i;\n\n\t\treturn 0;\n\t}\n\n\tinline bool warp_any(unsigned int thread_mask, bool predicate) { return predicate; }\n\t/**\n\t * Returns a bit mask whose bits are set to 1 for threads that evaluated the predicate to true.\n\t */\n\tinline unsigned long long int warp_ballot(unsigned int thread_mask, bool predicate) { return predicate ? 1 : 0; }\n\tinline unsigned int warp_activemask() { return 1; }\n\n\t/**\n\t * T can be a 32-bit integer type, 64-bit integer type or a single precision or double precision floating point type.\n\t *\n\t * The warp shuffle functions exchange values between threads within a warp.\n\t *\n\t * The optional width argument specifies subgroups, in which the warp can be\n\t * divided to share the variables. It has to be a power of two smaller than\n\t * or equal to warpSize. If it is smaller than warpSize, the warp is grouped\n\t * into separate groups, that are each indexed from 0 to width as if it was\n\t * its own entity, and only the lanes within that subgroup participate in the shuffle.\n\t * The lane indices in the subgroup are given by laneIdx % width.\n\t *\n\t * 'warp_shfl': The thread reads the value from the lane specified in srcLane\n\t */\n\ttemplate <typename T>\n\tinline T warp_shfl(T var, int srcLane, int width = 1) { return var; }\n\n\t/**\n\t * Returns the index within its warp (not group) of the calling thread\n\t * \n\t * Warp sizes of 1 on the CPU\n\t */\n\tHIPRT_HOST_DEVICE HIPRT_INLINE unsigned int warp_2D_thread_index()\n\t{\n\t\treturn 1;\n\t}\n#endif\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 matrix_X_point(const float4x4& m, const float3& p)\n{\n\tfloat x = p.x;\n\tfloat y = p.y;\n\tfloat z = p.z;\n\n\t// Assuming w = 1.0f for the point p\n\tfloat xt = m.m[0][0] * x + m.m[0][1] * y + m.m[0][2] * z + m.m[0][3];\n\tfloat yt = m.m[1][0] * x + m.m[1][1] * y + m.m[1][2] * z + m.m[1][3];\n\tfloat zt = m.m[2][0] * x + m.m[2][1] * y + m.m[2][2] * z + m.m[2][3];\n\tfloat wt = m.m[3][0] * x + m.m[3][1] * y + m.m[3][2] * z + m.m[3][3];\n\n\tfloat inv_w = 1.0f;\n\tif (!hippt::is_zero(wt))\n\t\tinv_w = 1.0f / wt;\n\n\treturn make_float3(xt * inv_w, yt * inv_w, zt * inv_w);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 matrix_X_vec(const float3x3& m, const float3& u)\n{\n\tfloat x = u.x;\n\tfloat y = u.y;\n\tfloat z = u.z;\n\n\t// Assuming w = 0.0f for the vector u\n\tfloat xt = m.m[0][0] * x + m.m[1][0] * y + m.m[2][0] * z;\n\tfloat yt = m.m[0][1] * x + m.m[1][1] * y + m.m[2][1] * z;\n\tfloat zt = m.m[0][2] * x + m.m[1][2] * y + m.m[2][2] * z;\n\n\treturn make_float3(xt, yt, zt);\n}\n\nHIPRT_HOST_DEVICE HIPRT_INLINE float3 matrix_X_vec(const float4x4& m, const float3& u)\n{\n\tfloat x = u.x;\n\tfloat y = u.y;\n\tfloat z = u.z;\n\n\t// Assuming w = 0.0f for the vector u\n\tfloat xt = m.m[0][0] * x + m.m[1][0] * y + m.m[2][0] * z;\n\tfloat yt = m.m[0][1] * x + m.m[1][1] * y + m.m[2][1] * z;\n\tfloat zt = m.m[0][2] * x + m.m[1][2] * y + m.m[2][2] * z;\n\tfloat wt = m.m[0][3] * x + m.m[1][3] * y + m.m[2][3] * z;\n\n\tfloat inv_w = 1.0f;\n\tif (!hippt::is_zero(wt))\n\t\tinv_w = 1.0f / wt;\n\n\treturn make_float3(xt * inv_w, yt * inv_w, zt * inv_w);\n}\n\n#ifndef __KERNELCC__\n\n#include <iostream>\nstatic std::ostream& operator<<(std::ostream& os, float3 uvw)\n{\n\tos << uvw.x << \", \" << uvw.y << \", \" << uvw.z;\n\treturn os;\n}\n\n#endif\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/MicrofacetRegularizationSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MICROFACET_REGULARIZATION_SETTINGS_H\n#define HOST_DEVICE_COMMON_MICROFACET_REGULARIZATION_SETTINGS_H\n\nstruct MicrofacetRegularizationSettings\n{\n\t// Maximum value that the microfacet distribution is allowed to take\n\t// The regularized roughness will be derived from this value\n\tfloat tau_0 = 30.0f;\n\n\t// Minimum roughness. Useful when lights are so small that even camera ray jittering\n\t// causes variance\n\tfloat min_roughness = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Packing.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_PACKING_H\n#define HOST_DEVICE_COMMON_PACKING_H\n\n#include \"Device/includes/FixIntellisense.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n\n/**\n * Packs 88 bools into a uchar\n */\nstruct UChar8BoolsPacked\n{\n\t/**\n\t * Returns the bool packed at bit 'index'. 0 is LSB.\n\t * \n\t * 'index' is in [0, 7]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE bool get_bool() const\n\t{\n\t\treturn m_packed & (1 << index);\n\t}\n\n\t/**\n\t * Sets the bool at bit 'index' in the packed data. 0 is LSB.\n\t * \n\t * 'index' is in [0, 7]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE void set_bool(bool value)\n\t{\n\t\t// Clear the bit\n\t\tm_packed &= ~(1 << index);\n\n\t\t// Sets\n\t\tm_packed |= (value ? 1 : 0) << index;\n\t}\n\nprivate:\n\tunsigned char m_packed = 0;\n};\n\n/**\n * Packs a ColorRGB32F into 3x8 bit = 24 bits (this isn't a loss of precision \n * for colors that already were in SDR [0, 255]).\n * \n * A float in range [0, 1] can be packed in the remaining 8 bits. \n * This leaves us with a precision of 0.004 between values in [0, 1]. Which is probably \n * more than enough. Who picks up the difference between a roughness of 0.5 and 0.504 anyways?\n */\nstruct ColorRGB24bFloat0_1Packed\n{\n\tstatic constexpr float inv_255\t\t = (1.0f / (255 << 0));\n\tstatic constexpr float inv_255_shl_8 = (1.0f / (255 << 8));\n\n\tHIPRT_DEVICE ColorRGB32F get_color() const\n\t{\n\t\tfloat r = static_cast<float>(m_packed & 0x000000FF) * inv_255;\n\t\tfloat g = static_cast<float>(m_packed & 0x0000FF00) * inv_255_shl_8;\n\t\tfloat b = static_cast<float>((m_packed & 0x00FF0000) >> 16) * inv_255;\n\n\t\treturn ColorRGB32F(r, g, b);\n\t}\n\n\tHIPRT_DEVICE float get_float() const\n\t{\n\t\treturn static_cast<float>((m_packed & 0xFF000000) >> 24) * inv_255;\n\t}\n\n\tHIPRT_DEVICE void set_color(const ColorRGB32F& color)\n\t{\n\t\t// Clear 24 lower bits\n\t\tm_packed &= 0xFF000000;\n\n\t\t// Set\n\t\tm_packed |= static_cast<unsigned char>(color.r * 255.0f);\n\t\tm_packed |= static_cast<unsigned char>(color.g * 255.0f) << 8;\n\t\tm_packed |= static_cast<unsigned char>(color.b * 255.0f) << 16;\n\t}\n\n\tHIPRT_DEVICE void set_float(float float_in_0_1)\n\t{\n\t\t// Clear\n\t\tm_packed &= 0x00FFFFFF;\n\n\t\t// Set\n\t\tm_packed |= static_cast<unsigned char>(float_in_0_1 * 255.0f) << 24;\n\t}\n\nprivate:\n\tunsigned int m_packed = 0;\n};\n\n/**\n * 4 floats in [0, 1] all packed into a 32 bit unsigned int.\n * \n * This gives 8 bits for each float in [0, 1] --> precision of 0.004\n */\nstruct Float4xPacked\n{\n\tstatic constexpr float inv_255 = 0.00392156862745098039f;\n\n\t/**\n\t * Returns the float at index 'index'\n\t * \n\t * 'index' must be in [0, 3]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE float get_float() const\n\t{ \n\t\treturn static_cast<float>((m_packed & (0xFFu << (index * 8))) >> (index * 8)) * inv_255;\n\t}\n\n\t/**\n\t * Sets the float number 'index' of this 4x packed float\n\t * \n\t * 'index' must be in [0, 3]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE void set_float(float value)\n\t{\n\t\t// Clear\n\t\tm_packed &= ~(0xFFu << (index * 8));\n\n\t\t// Set\n\t\tm_packed |= static_cast<unsigned char>(value * 255.0f) << (index * 8);\n\t}\n\nprivate:\n\tunsigned int m_packed = 0;\n};\n\n/**\n * 2 floats in [0, 1] and 2 unsigned chars all packed into a 32 bit unsigned int.\n *\n * This gives 8 bits for each float in [0, 1] --> precision of 0.004\n */\nstruct Float2xUChar2xPacked\n{\n\tstatic constexpr float inv_255 = 0.00392156862745098039f;\n\n\t/**\n\t * Returns one of the float packed in this structure\n\t * \n\t * 'index' must be in [0, 1]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE float get_float() const\n\t{\n\t\treturn ((m_packed & (0xFF << (index * 8))) >> (index * 8)) * inv_255;\n\t}\n\n\t/**\n\t * Returns one of the unsigned char packed in this structure\n\t *\n\t * 'index' must be in [0, 1]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE unsigned char get_uchar() const\n\t{\n\t\treturn (m_packed & (0x00FF0000u << (index * 8))) >> (index * 8 + 16);\n\t}\n\t\n\t/**\n\t * 'index' must be in [0, 1]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE void set_float(float value)\n\t{\n\t\t// Clear\n\t\tm_packed &= ~(0xFFu << (index * 8));\n\n\t\t// Set\n\t\tm_packed |= static_cast<unsigned char>(value * 255.0f) << (index * 8);\n\t}\n\n\t/**\n\t * 'index' must be in [0, 1]\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE void set_uchar(unsigned char value)\n\t{\n\t\t// Clear\n\t\tm_packed &= ~(0x00FF0000u << (index * 8));\n\n\t\t// Set\n\t\tm_packed |= value << (index * 8 + 16);\n\t}\n\nprivate:\n\t// Floats are in the 16 LSB\n\t// Uchars are in the 16 MSB\n\tunsigned int m_packed = 0;\n};\n\n/**\n * Packs two uints 16bits into one 32bit\n */\nstruct Uint2xPacked\n{\n\t/**\n\t * Index must be 0 or 1\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE unsigned short get_value() const\n\t{\n\t\treturn (m_packed & (0xFFFFu << (index * 16))) >> (index * 16);\n\t}\n\n\t/**\n\t * Index must be 0 or 1\n\t */\n\ttemplate <unsigned char index>\n\tHIPRT_DEVICE void set_value(unsigned short value)\n\t{\n\t\t// Clear\n\t\tm_packed &= ~(0xFFFFu << (index * 16));\n\n\t\t// Set\n\t\tm_packed |= value << (index * 16);\n\t}\n\nprivate:\n\tunsigned int m_packed = 0;\n};\n\n/**\n * Reference:\n * \n * [1] [Survey of Efficient Representations for Independent Unit Vectors, Cigolle et al., 2014]\n */\nstruct GPU_CPU_ALIGN(4) Octahedral24BitNormalPadded32b\n{\npublic:\n\tHIPRT_DEVICE Octahedral24BitNormalPadded32b() {}\n\tHIPRT_DEVICE Octahedral24BitNormalPadded32b(float3 normal)\n\t{\n\t\tpack(normal);\n\t}\n\n\tHIPRT_DEVICE static Octahedral24BitNormalPadded32b pack_static(float3 normal)\n\t{\n\t\tOctahedral24BitNormalPadded32b packed;\n\t\tpacked.pack(normal);\n\n\t\treturn packed;\n\t}\n\n\tHIPRT_DEVICE void pack(float3 normal)\n\t{\n\t\tfloat2_to_Snorm12_2x_as_3UChar(octahedral_encode(normal), m_packed_x, m_packed_y, m_packed_z);\n\t}\n\n\t/**\n\t * Returns the normal that was packed in there\n\t * \n\t * The returned normal is normalized\n\t */\n\tHIPRT_DEVICE float3 unpack() const\n\t{\n\t\tfloat2 v = Snorm12_2x_as_UChar_to_float2(m_packed_x, m_packed_y, m_packed_z);\n\t\treturn final_decode(v.x, v.y);\n\t}\n\nprivate:\n\tHIPRT_DEVICE float pack_Snorm12_float(float f)\n\t{\n\t\treturn roundf(hippt::clamp(0.0f, 2.0f, f + 1.0f) * 2047.0f);\n\t}\n\n\tHIPRT_DEVICE void Snorm12_2x_as_3Uchar(float2 s, unsigned char& out_x, unsigned char& out_y, unsigned char& out_z)\n\t{\n\t\tfloat3 u;\n\t\tu.x = s.x / 16.0f;\n\t\tfloat t = floorf(s.y / 256.0f);\n\t\tu.y = ((u.x - floorf(u.x)) * 256.0f) + t;\n\t\tu.z = s.y - (t * 256.0f);\n\n\t\tout_x = u.x;\n\t\tout_y = u.y;\n\t\tout_z = u.z;\n\t}\n\n\tHIPRT_DEVICE void float2_to_Snorm12_2x_as_3UChar(float2 v, unsigned char& out_x, unsigned char& out_y, unsigned char& out_z)\n\t{\n\t\tfloat2 s = make_float2(pack_Snorm12_float(v.x), pack_Snorm12_float(v.y));\n\n\t\tSnorm12_2x_as_3Uchar(s, out_x, out_y, out_z);\n\t}\n\n\tHIPRT_DEVICE float2 octahedral_encode(float3 v)\n\t{\n\t\tfloat l1norm_inv = 1.0f / (abs(v.x) + abs(v.y) + abs(v.z));\n\t\tfloat2 result = make_float2(v.x * l1norm_inv, v.y * l1norm_inv);\n\t\tif (v.z < 0.0f)\n\t\t\tresult = (make_float2(1.0f) - make_float2(hippt::abs(result.y), hippt::abs(result.x))) * sign_not_zero(make_float2(result.x, result.y));\n\n\t\treturn result;\n\t}\n\n\tHIPRT_DEVICE float sign_not_zero(float k) const\n\t{\n\t\treturn k >= 0.0f ? 1.0f : -1.0f;\n\t}\n\n\tHIPRT_DEVICE float2 sign_not_zero(float2 v) const\n\t{\n\t\treturn make_float2(sign_not_zero(v.x), sign_not_zero(v.y));\n\t}\n\n\tHIPRT_DEVICE float3 final_decode(float x, float y) const\n\t{\n\t\tfloat3 v = make_float3(x, y, 1.0f - abs(x) - abs(y));\n\t\tif (v.z < 0.0f) \n\t\t{\n\t\t\tfloat2 temp = make_float2(v.x, v.y);\n\t\t\tv.x = (1.0f - hippt::abs(temp.y)) * sign_not_zero(temp.x);\n\t\t\tv.y = (1.0f - hippt::abs(temp.x)) * sign_not_zero(temp.y);\n\t\t}\n\t\treturn hippt::normalize(v);\n\t}\n\n\tHIPRT_DEVICE float2 Snorm12_2x_as_Uchar_to_packed_float2(unsigned char x, unsigned char y, unsigned char z) const\n\t{\n\t\tfloat2 s;\n\n\t\tfloat temp = y / 16.0f;\n\t\ts.x = x * 16.0f + floorf(temp);\n\t\ts.y = (temp - floorf(temp)) * 256.0f * 16.0f + z;\n\n\t\treturn s;\n\t}\n\n\tHIPRT_DEVICE float unpack_Snorm12(float f) const\n\t{\n\t\treturn hippt::clamp(-1.0f, 1.0f, (f / 2047.0f) - 1.0f);\n\t}\n\n\tHIPRT_DEVICE float2 Snorm12_2x_as_UChar_to_float2(unsigned char x, unsigned char y, unsigned char z) const\n\t{\n\t\tfloat2 s = Snorm12_2x_as_Uchar_to_packed_float2(x, y, z);\n\t\treturn make_float2(unpack_Snorm12(s.x), unpack_Snorm12(s.y));\n\t}\n\n\tunsigned char m_packed_x = 0;\n\tunsigned char m_packed_y = 0;\n\tunsigned char m_packed_z = 0;\n\t// This padding here improves performance significantly on my machine for the megakernel.\n\t// Order of 60% faster, mainly due to a massive reduction in register pressure and we got 2 more wavefronts running\n\t// out of that. Tested with a lambertian BRDF\n\t//\n\t// Doesn't really make sense that we would get any register out of that but :shrug:.\n\t// This padding here is theoretically better anyways thanks to the 4 bytes alignment that it\n\t// provides instead of the 3-bytes alignement of the default packed struct (which is poor access pattern on the GPU)\n\tunsigned char padding = 0;\n};\n\n/**\n * Packs a float3 into 8 bytes (saves 4 bytes) with very good precision\n * \n * This stores the length of the float3 and then normalizes it and then stores\n * a 10 bit quantized version of each normalized component of the float3\n */\nstruct Float3xLengthUint10bPacked\n{\n\tHIPRT_DEVICE void pack(float3 data)\n\t{\n\t\tlength = hippt::length(data);\n\n\t\tfloat3 normalized = data / length;\n\n\t\t// Bringing in [0, 1] from [-1, 1]\n\t\tnormalized += make_float3(1.0f, 1.0f, 1.0f);\n\t\tnormalized *= 0.5f;\n\n\t\tunsigned int quantized_x = roundf(normalized.x * 1023);\n\t\tunsigned int quantized_y = roundf(normalized.y * 1023);\n\t\tunsigned int quantized_z = roundf(normalized.z * 1023);\n\n\t\tquantized = 0;\n\t\tquantized |= quantized_x;\n\t\tquantized |= quantized_y << 10;\n\t\tquantized |= quantized_z << 20;\n\t}\n\n\tHIPRT_DEVICE void pack(ColorRGB32F data)\n\t{\n\t\tpack(make_float3(data.r, data.g, data.b));\n\t}\n\n\tHIPRT_DEVICE static Float3xLengthUint10bPacked pack_static(float3 data)\n\t{\n\t\tFloat3xLengthUint10bPacked packed;\n\t\tpacked.pack(data);\n\n\t\treturn packed;\n\t}\n\n\tHIPRT_DEVICE static Float3xLengthUint10bPacked pack_static(ColorRGB32F data)\n\t{\n\t\tFloat3xLengthUint10bPacked packed;\n\t\tpacked.pack(data);\n\n\t\treturn packed;\n\t}\n\n\tHIPRT_DEVICE ColorRGB32F unpack_color3x32f() const\n\t{\n\t\tfloat3 unpacked = unpack_float3();\n\n\t\treturn ColorRGB32F(unpacked.x, unpacked.y, unpacked.z);\n\t}\n\n\tHIPRT_DEVICE float3 unpack_float3() const\n\t{\n\t\tunsigned int quantized_x = (quantized >> 00) & 0b1111111111;\n\t\tunsigned int quantized_y = (quantized >> 10) & 0b1111111111;\n\t\tunsigned int quantized_z = (quantized >> 20) & 0b1111111111;\n\n\t\tfloat3 normalized = make_float3(quantized_x / 1023.0f, quantized_y / 1023.0f, quantized_z / 1023.0f);\n\t\t// Back in [-1, 1] from [0, 1]\n\t\tfloat3 rescaled = normalized * 2.0f - 1.0f;\n\t\tfloat3 with_length = rescaled * length;\n\n\t\treturn with_length;\n\t}\n\nprivate:\n\tfloat length = 0.0f;\n\tunsigned quantized = 0;\n};\n\n/**\n * Reference: https://github.com/microsoft/DirectX-Graphics-Samples/blob/master/MiniEngine/Core/Shaders/PixelPacking_RGBE.hlsli\n */\nstruct RGBE9995Packed\n{\n\t// RGBE, aka R9G9B9E5_SHAREDEXP, is an unsigned float HDR pixel format where red, green,\n\t// and blue all share the same exponent.  The color channels store a 9-bit value ranging\n\t// from [0/512, 511/512] which multiplies by 2^Exp and Exp ranges from [-15, 16].\n\t// Floating point specials are not encoded.\n\tHIPRT_DEVICE void pack(ColorRGB32F rgb)\n\t{\n\t\t// To determine the shared exponent, we must clamp the channels to an expressible range\n\t\tconst float kMaxVal = hippt::asfloat(0x477F8000); // 1.FF x 2^+15\n\t\tconst float kMinVal = hippt::asfloat(0x37800000); // 1.00 x 2^-16\n\n\t\t// Non-negative and <= kMaxVal\n\t\trgb.clamp(0.0f, kMaxVal);\n\n\t\t// From the maximum channel we will determine the exponent.  We clamp to a min value\n\t\t// so that the exponent is within the valid 5-bit range.\n\t\tfloat MaxChannel = hippt::max(hippt::max(kMinVal, rgb.r), hippt::max(rgb.g, rgb.b));\n\n\t\t// 'Bias' has to have the biggest exponent plus 15 (and nothing in the mantissa).  When\n\t\t// added to the three channels, it shifts the explicit '1' and the 8 most significant\n\t\t// mantissa bits into the low 9 bits.  IEEE rules of float addition will round rather\n\t\t// than truncate the discarded bits.  Channels with smaller natural exponents will be\n\t\t// shifted further to the right (discarding more bits).\n\t\tfloat Bias = hippt::asfloat((hippt::asuint(MaxChannel) + 0x07804000) & 0x7F800000);\n\n\t\t// Shift bits into the right places\n\t\tunsigned int R, G, B;\n\t\tR = hippt::asuint(rgb.r + Bias);\n\t\tG = hippt::asuint(rgb.g + Bias);\n\t\tB = hippt::asuint(rgb.b + Bias);\n\n\t\tunsigned int E = (hippt::asuint(Bias) << 4) + 0x10000000;\n\t\tm_packed = E | B << 18 | G << 9 | (R & 0x1FF);\n\t}\n\n\tHIPRT_DEVICE ColorRGB32F unpack() const\n\t{\n\t\tfloat3 rgb = make_float3(m_packed & 0x1FF, (m_packed >> 9) & 0x1FF, (m_packed >> 18) & 0x1FF);\n\t\treturn ColorRGB32F(hippt::ldexp(rgb, static_cast<int>(m_packed >> 27) - 24));\n\t}\n\nprivate:\n\tunsigned int m_packed;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/PathRussianRoulette.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_PATH_RUSSIAN_ROULETTE_H\n#define HOST_DEVICE_COMMON_PATH_RUSSIAN_ROULETTE_H\n\nenum PathRussianRoulette\n{\n    MAX_THROUGHPUT,\n    ARNOLD_2014\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/PrecomputedEmissiveTrianglesDataSoADevice.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_PRECOMPUTED_EMISSIVE_TRIANGLE_DATA_SOA_DEVICE_H\n#define HOST_DEVICE_COMMON_PRECOMPUTED_EMISSIVE_TRIANGLE_DATA_SOA_DEVICE_H\n\nstruct PrecomputedEmissiveTrianglesDataSoADevice\n{\n\tfloat3* triangles_A = nullptr;\n\tfloat3* triangles_AB = nullptr;\n\tfloat3* triangles_AC = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/RIS/RISSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RIS_SETTINGS_H\n#define HOST_DEVICE_COMMON_RIS_SETTINGS_H\n\nstruct RISSettings\n{\n\t// How many candidate lights to sample for RIS (Resampled Importance Sampling)\n\tint number_of_light_candidates = 4;\n\t// How many candidates samples from the BSDF to use in combination\n\t// with the light candidates for RIS\n\tint number_of_bsdf_candidates = 1;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIR/ReSTIRCommonSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RESTIR_COMMON_SETTINGS_H\n#define HOST_DEVICE_COMMON_RESTIR_COMMON_SETTINGS_H\n\n/**\n * Note that no default values are set here because they are all set in\n * the ReSTIR_XXX_DefaultSettings structure/header files\n */\n\nstruct ReSTIRCommonTemporalPassSettings\n{\n\tbool do_temporal_reuse_pass;\n\n\t// If true, the position of the canonical temporal neighbor will be shuffled to increase\n\t// variation between frames and make the render more denoiser friendly\n\tbool use_permutation_sampling;\n\t// Random bits used for all the pixels in the image for the permutation sampling\n\tint permutation_sampling_random_bits;\n\n\t// How many neighbors at most to check around the temporal back-projected pixel location \n\t// to find a valid neighbor\n\tint max_neighbor_search_count;\n\t// Radius around the temporal reprojected location of a pixel in which to look for an\n\t// acceptable temporal neighbor\n\tint neighbor_search_radius;\n\n\t// If set to true, the temporal buffers will be cleared by the camera\n\t// rays kernel\n\tbool temporal_buffer_clear_requested;\n};\n\nstruct ReSTIRCommonSpatialPassSettings\n{\n\tbool do_spatial_reuse_pass;\n\n\t// What spatial pass are we currently performing?\n\t// Takes values in [0, number_of_passes - 1]\n\tint spatial_pass_index;\n\t// How many spatial reuse pass to perform\n\tint number_of_passes;\n\t// The radius within which neighbor are going to be reused spatially\n\tint reuse_radius;\n\t// if true, the reuse radius will automatically be adjusted based on the render resolution\n\tbool auto_reuse_radius = true;\n\t// How many neighbors to reuse during the spatial pass\n\tint reuse_neighbor_count;\n\n\t// Whether or not to increase the number of spatially resampled neighbor\n\t// for disoccluded pixels (that have no temporal history)\n\tbool do_disocclusion_reuse_boost;\n\t// How many neighbors to spatially reuse when a disocclusion is detected.\n\t// This reduces the increased variance of disoccluded regions\n\tint disocclusion_reuse_count;\n\n\t// If true, reused neighbors will be hardcoded to always be 'reuse_radius' pixels to the right,\n\t// not in a circle around the center pixel.\n\tbool debug_neighbor_location;\n\t// If this is 0, the debug location will be horizontal\n\t// If this is 1, the debug location will be vertical\n\t// If this is 2, the debug location will be in diagonal\n\tint debug_neighbor_location_direction;\n\n\t// Whether or not to rotate the spatial neighbor locations generated.\n\t// Pretty much mandatory when using Hammersley points otherwise the neighbors\n\t// will always be the exact same\n\tbool do_neighbor_rotation;\n\t// This seed is used to generate the spatial neighbors positions if not using Hammersley\n\tunsigned int spatial_neighbors_rng_seed;\n\t// Reuses the same random numbers for all the pixels in the image for picking the spatial neighbors\n\t// such that memory accesses to surface data / reservoirs are coalesced\n\tbool coalesced_spatial_reuse;\n\n\t// If true, the best per-pixel spatial reuse radius to use as\n\t// well as the sectors in the spatial reuse disk (split in 32 sectors) that should be used for reuse\n\t// will be precomputed in a prepass\n\t//\n\t// This increases the spatial reuse \"hit rate\" (i.e. the number of neighbors that are not rejected by G-Buffer heuristics)\n\t// and thus increases convergence speed.\n\tbool use_adaptive_directional_spatial_reuse;\n\n\t/**\n\t * If you want to check whether you should use the features of the adaptive directional spatial\n\t * reuse, prefer using this function rather than directly checking the 'use_adaptive_directional_spatial_reuse'\n\t * member\n\t * \n\t * This is because the directional spatial reuse feature cannot be used in realtime mode so if you use the\n\t * 'use_adaptive_directional_spatial_reuse' member directly, you would also have to check for 'render_data.render_settings.accumulate'\n\t * everytime.\n\t * \n\t * This function does it all\n\t */\n\tHIPRT_HOST_DEVICE bool do_adaptive_directional_spatial_reuse(bool render_data_render_settings_accumulate) const\n\t{\n\t\treturn use_adaptive_directional_spatial_reuse && render_data_render_settings_accumulate;\n\t}\n\n\t// If true, neighboring pixels that have converged (if adaptive sampling is enabled)\n\t// won't be reused to reduce bias.\n\t// If false, even neighboring pixels that have converged can be reused by the spatial pass\n\tbool allow_converged_neighbors_reuse;\n\t// If we're allowing the spatial reuse of converged neighbors, we're doing so we're a given\n\t// probability instead of always/never. This helps trade performance for bias.\n\tfloat converged_neighbor_reuse_probability;\n\n\t// If true, the visibility in the target function will only be used on the last spatial reuse\n\t// pass (and also if visibility is wanted)\n\tbool do_visibility_only_last_pass;\n\t// Visibility term in the target function will only be used for the first\n\t// 'neighbor_visibility_count' neighbors, not all.\n\tint neighbor_visibility_count;\n\n\tunsigned int* per_pixel_spatial_reuse_directions_mask_u = nullptr;\n\tunsigned long long int* per_pixel_spatial_reuse_directions_mask_ull = nullptr;\n\t// Framebuffer that contains per-pixel spatial radius for use in the spatial reuse passes of ReSTIR.\n\t// This framebuffer is filled by the \n\tunsigned char* per_pixel_spatial_reuse_radius = nullptr;\n\t// The minimum radius that will be used per pixel when the optimal per - pixel spatial reuse\n\t// radius is computed by adaptive-directional spatial reuse\n\tint minimum_per_pixel_reuse_radius = 3;\n\n\t// This variable here is spatial because it is written to at the beginning of the spatial reuse pass.\n\t// The only goal of this variable is to be able to carry around the function the direction reuse mask\n\t// (i.e. which directions are allowed for reuse)of the pixel.\n\t//\n\t// This is purely to avoid passing yet another arguments to every function in the code...\n\tunsigned long long int current_pixel_directions_reuse_mask = 0;\n\n\t// Whether or not to gather statistics on the hit rate of the spatial reuse pass (i.e. how many\n\t// neighbors are rejected because of the G-Buffer heuristics vs. the maximum number of neighbors that can be reused)\n\tbool compute_spatial_reuse_hit_rate;\n\t// Counters for gathering the statistics on the spatial reuse hit rate\n\tAtomicType<unsigned long long int>* spatial_reuse_hit_rate_hits = nullptr;\n\tAtomicType<unsigned long long int>* spatial_reuse_hit_rate_total = nullptr;\n};\n\nstruct ReSTIRCommonNeighborSimiliaritySettings\n{\n\tbool use_normal_similarity_heuristic;\n\t// User-friendly (for ImGui) normal angle. When resampling a neighbor (temporal or spatial),\n\t// the normal of the neighbor being re-sampled must be similar to our normal. This angle gives the\n\t// \"similarity threshold\". Normals must be within 25 degrees of each other by default\n\tfloat normal_similarity_angle_degrees;\n\t// Precomputed cosine of the angle for use in the shader\n\tfloat normal_similarity_angle_precomp; // Normals must be within 25 degrees by default\n\t// If true, the geometric normals will be compared for the normal rejection heuristic.\n\t// If false, smooth vertex normals (or normal map normals) will be compared\n\t//\n\t// Geometric normals are prefered as they are not disturbed by high details normal maps\n\tbool reject_using_geometric_normals;\n\n\tbool use_plane_distance_heuristic;\n\t// Threshold used when determining whether a temporal neighbor is acceptable\n\t// for temporal reuse regarding the spatial proximity of the neighbor and the current\n\t// point. \n\t// This is a world space distance.\n\tfloat plane_distance_threshold;\n\n\tbool use_roughness_similarity_heuristic;\n\t// How close the roughness of the neighbor's surface must be to ours to resample that neighbor\n\t// If this value is 0.25f for example, then the roughnesses must be within 0.25f of each other. Simple.\n\tfloat roughness_similarity_threshold;\n};\n\nstruct ReSTIRCommonSettings\n{\n\t// Settings for the initial candidates generation pass\n\tReSTIRCommonTemporalPassSettings common_temporal_pass;\n\t// Settings for the spatial reuse pass\n\tReSTIRCommonSpatialPassSettings common_spatial_pass;\n\n\tReSTIRCommonNeighborSimiliaritySettings neighbor_similarity_settings;\n\n\t// When finalizing the reservoir in the spatial reuse pass, what value\n\t// to cap the reservoirs's M value to.\n\t//\n\t// The point of this parameter is to avoid too much correlation between frames if using\n\t// a bias correction that uses confidence weights. Without M-capping, the M value of a reservoir\n\t// will keep growing exponentially through temporal and spatial reuse and when that exponentially\n\t// grown M value is used in confidence weights, it results in new samples being very unlikely \n\t// to be chosen which in turn results in non-convergence since always the same sample is evaluated\n\t// for a given pixel.\n\t//\n\t// A M-cap value between 5 - 30 is usually good\n\t//\n\t// 0 for infinite M-cap (don't...)\n\tint m_cap;\n\n\t// Whether or not to use confidence weights when resampling neighbors.\n\tbool use_confidence_weights;\n\n\t// Beta exponent to the difference function for symmetric and asymmetric ratio MIS weights\n\tfloat symmetric_ratio_mis_weights_beta_exponent = 2.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIR/ReSTIRDIDefaultSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_RESTIR_DI_DEFAULT_SETTINGS_H\n#define HOST_DEVICE_RESTIR_DI_DEFAULT_SETTINGS_H\n\n#include \"HostDeviceCommon/ReSTIR/ReSTIRCommonSettings.h\"\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIR/ReSTIRDISettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_RESTIR_DI_SETTINGS_H\n#define HOST_DEVICE_RESTIR_DI_SETTINGS_H\n\n#include \"HostDeviceCommon/ReSTIR/ReSTIRDIDefaultSettings.h\"\n\nstruct ReSTIRDIReservoir;\nstruct ReSTIRDIPresampledLight;\n\nstruct ReSTIRDIInitialCandidatesSettings\n{\n\t// How many light candidates to resamples during the initial candidates sampling pass\n\tint number_of_initial_light_candidates = 4;\n\t// How many BSDF candidates to resamples during the initial candidates sampling pass\n\tint number_of_initial_bsdf_candidates = 1;\n\t// For each 'number_of_initial_light_candidates', the probability that this light sample\n\t// will sample the envmap instead of a light in the scene\n\tfloat envmap_candidate_probability = 0.5f;\n\n\t// Buffer that contains the reservoirs that will hold the reservoir\n\t// for the initial candidates generated\n\tReSTIRDIReservoir* output_reservoirs = nullptr;\n};\n\nstruct ReSTIRDITemporalPassSettings\n{\n\t// The temporal reuse pass resamples the initial candidates as well as the last frame reservoirs which\n\t// are accessed through this pointer\n\tReSTIRDIReservoir* input_reservoirs = nullptr;\n\t// Buffer that holds the output of the temporal reuse pass\n\tReSTIRDIReservoir* output_reservoirs = nullptr;\n};\n\nstruct ReSTIRDISpatialPassSettings\n{\n\t// Buffer that contains the input reservoirs for the spatial reuse pass\n\tReSTIRDIReservoir* input_reservoirs = nullptr;\n\t// Buffer that contains the output reservoir of the spatial reuse pass\n\tReSTIRDIReservoir* output_reservoirs = nullptr;\n};\n\nstruct ReSTIRDILightPresamplingSettings\n{\n\t// From all the lights of the scene, how many subsets to presample\n\tint number_of_subsets = 128;\n\t// How many lights to presample in each subset\n\tint subset_size = 1024;\n\t// All threads in a tile_size * tile_size block of pixels will sample from the same subset of light samples\n\tint tile_size = 8;\n\n\t// Buffer for the presampled light samples\n\tReSTIRDIPresampledLight* light_samples;\n};\n\nstruct ReSTIRDISettings : public ReSTIRCommonSettings\n{\n\tHIPRT_HOST_DEVICE ReSTIRDISettings() \n\t{\n\t\tcommon_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\tcommon_temporal_pass.use_permutation_sampling = false;\n\t\tcommon_temporal_pass.permutation_sampling_random_bits = 42;\n\n\t\tcommon_temporal_pass.max_neighbor_search_count = 8;\n\t\tcommon_temporal_pass.neighbor_search_radius = 4;\n\n\t\tcommon_temporal_pass.temporal_buffer_clear_requested = false;\n\n\n\n\n\n\t\tcommon_spatial_pass.do_spatial_reuse_pass = true;\n\n\t\tcommon_spatial_pass.spatial_pass_index = 0;\n\t\tcommon_spatial_pass.number_of_passes = 1;\n\t\tcommon_spatial_pass.reuse_radius = 16;\n\t\tcommon_spatial_pass.reuse_neighbor_count = 5;\n\n\t\tcommon_spatial_pass.do_disocclusion_reuse_boost = false;\n\t\tcommon_spatial_pass.disocclusion_reuse_count = 5;\n\n\t\tcommon_spatial_pass.debug_neighbor_location = false;\n\t\tcommon_spatial_pass.debug_neighbor_location_direction = 0;\n\n\t\tcommon_spatial_pass.do_neighbor_rotation = false;\n\t\tcommon_spatial_pass.spatial_neighbors_rng_seed = 42;\n\t\tcommon_spatial_pass.coalesced_spatial_reuse = true;\n\t\tcommon_spatial_pass.use_adaptive_directional_spatial_reuse = false;\n\n\t\tcommon_spatial_pass.allow_converged_neighbors_reuse = false;\n\t\tcommon_spatial_pass.converged_neighbor_reuse_probability = 0.5f;\n\n\t\tcommon_spatial_pass.do_visibility_only_last_pass = true;\n\t\tcommon_spatial_pass.neighbor_visibility_count = common_spatial_pass.do_disocclusion_reuse_boost ? common_spatial_pass.disocclusion_reuse_count : common_spatial_pass.reuse_neighbor_count;\n\n\t\tcommon_spatial_pass.compute_spatial_reuse_hit_rate = false;\n\n\n\n\n\n\t\tneighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\tneighbor_similarity_settings.normal_similarity_angle_degrees = 37.5f;\n\t\tneighbor_similarity_settings.normal_similarity_angle_precomp = 0.906307787f;\n\t\tneighbor_similarity_settings.reject_using_geometric_normals = true;\n\n\t\tneighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\tneighbor_similarity_settings.plane_distance_threshold = 0.1f;\n\n\t\tneighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\tneighbor_similarity_settings.roughness_similarity_threshold = 0.25f;\n\n\n\n\n\n\t\tm_cap = 3;\n\t\tuse_confidence_weights = true;\n\t}\n\n\t// Settings for the initial candidates generation pass\n\tReSTIRDIInitialCandidatesSettings initial_candidates;\n\t// Settings for the temporal reuse pass\n\tReSTIRDITemporalPassSettings temporal_pass;\n\t// Settings for the spatial reuse pass\n\tReSTIRDISpatialPassSettings spatial_pass;\n\t// Settings for the light presampling pass\n\tReSTIRDILightPresamplingSettings light_presampling;\n\n\t// If true, the spatial and temporal pass will be fused into a single kernel call.\n\t// This avois a synchronization barrier between the temporal pass and the spatial pass\n\t// and increases performance.\n\t// Because the spatial must then resample without the output of the temporal pass, the spatial\n\t// pass only resamples on the temporal reservoir buffer, not the temporal + initial candidates reservoir\n\t// (which is the output of the temporal pass). This is usually imperceptible.\n\tbool do_fused_spatiotemporal = false;\n\n\t// Whether or not to trace a visibility ray when evaluating the final light sample produced by ReSTIR.\n\t// This is strongly biased but allows good performance.\n\tbool do_final_shading_visibility = true;\n\n\t// Pointer to the buffer that contains the output of all the passes of ReSTIR DI\n\t// This the buffer that should be used when evaluating direct lighting in the path tracer\n\t// \n\t// This buffer isn't allocated but is actually just a pointer\n\t// to the buffer that was last used as the output of the resampling\n\t// passes last frame. \n\t// For example if there was spatial reuse in last frame, this buffer\n\t// is going to be a pointer to the output of the spatial reuse pass\n\t// If there was only temporal reuse pass last frame, this buffer is going\n\t// to be a pointer to the output of the temporal reuse pass\n\t// \n\t// This is handy to know which buffer the temporal reuse pass is going to use\n\t// as input on the next frame\n\tReSTIRDIReservoir* restir_output_reservoirs = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIR/ReSTIRGIDefaultSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_RESTIR_GI_DEFAULT_SETTINGS_H\n#define HOST_DEVICE_RESTIR_GI_DEFAULT_SETTINGS_H\n\n#include \"HostDeviceCommon/ReSTIR/ReSTIRCommonSettings.h\"\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIR/ReSTIRGISettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_RESTIR_GI_SETTINGS_H\n#define HOST_DEVICE_RESTIR_GI_SETTINGS_H\n\n#include \"HostDeviceCommon/ReSTIR/ReSTIRGIDefaultSettings.h\"\n\nstruct ReSTIRGIReservoir;\n\nstruct ReSTIRGIInitialCandidatesPassSettings\n{\n\t// Buffer that contains the reservoirs that will hold the reservoir\n\t// for the initial candidates generated\n\tReSTIRGIReservoir* initial_candidates_buffer = nullptr;\n};\n\nstruct ReSTIRGITemporalPassSettings\n{\n\t// Buffer that contains the input reservoirs for the temporal reuse pass\n\tReSTIRGIReservoir* input_reservoirs = nullptr;\n\t// Buffer that contains the output reservoir of the temporal reuse pass\n\tReSTIRGIReservoir* output_reservoirs = nullptr;\n};\n\nstruct ReSTIRGISpatialPassSettings\n{\n\t// Buffer that contains the input reservoirs for the spatial reuse pass\n\tReSTIRGIReservoir* input_reservoirs = nullptr;\n\t// Buffer that contains the output reservoir of the spatial reuse pass\n\tReSTIRGIReservoir* output_reservoirs = nullptr;\n};\n\nenum ReSTIRGIDebugView\n{\n\tNO_DEBUG = 0,\n\tFINAL_RESERVOIR_UCW = 1,\n\tTARGET_FUNCTION = 2,\n\tWEIGHT_SUM = 3,\n\tM_COUNT = 4,\n\tPER_PIXEL_REUSE_RADIUS = 5,\n\tPER_PIXEL_VALID_DIRECTIONS_PERCENTAGE = 6,\n};\n\nstruct ReSTIRGISettings : public ReSTIRCommonSettings\n{\n\tHIPRT_HOST_DEVICE ReSTIRGISettings() \n\t{\n\t\tcommon_temporal_pass.do_temporal_reuse_pass = true;\n\t\t\n\t\tcommon_temporal_pass.use_permutation_sampling = false;\n\t\tcommon_temporal_pass.permutation_sampling_random_bits = 42;\n\n\t\tcommon_temporal_pass.max_neighbor_search_count = 8;\n\t\tcommon_temporal_pass.neighbor_search_radius = 4;\n\n\t\tcommon_temporal_pass.temporal_buffer_clear_requested = false;\n\n\n\n\n\n\t\tcommon_spatial_pass.do_spatial_reuse_pass = true;\n\n\t\tcommon_spatial_pass.spatial_pass_index = 0;\n\t\tcommon_spatial_pass.number_of_passes = 2;\n\t\tcommon_spatial_pass.reuse_radius = 20;\n\t\tcommon_spatial_pass.reuse_neighbor_count = 5;\n\n\t\tcommon_spatial_pass.do_disocclusion_reuse_boost = false;\n\t\tcommon_spatial_pass.disocclusion_reuse_count = 5;\n\n\t\tcommon_spatial_pass.debug_neighbor_location = false;\n\t\tcommon_spatial_pass.debug_neighbor_location_direction = 0;\n\n\t\tcommon_spatial_pass.do_neighbor_rotation = false;\n\t\tcommon_spatial_pass.spatial_neighbors_rng_seed = 42;\n\t\tcommon_spatial_pass.coalesced_spatial_reuse = false;\n\t\tcommon_spatial_pass.use_adaptive_directional_spatial_reuse = true;\n\n\t\tcommon_spatial_pass.allow_converged_neighbors_reuse = false;\n\t\tcommon_spatial_pass.converged_neighbor_reuse_probability = 0.5f;\n\n\t\tcommon_spatial_pass.do_visibility_only_last_pass = true;\n\t\tcommon_spatial_pass.neighbor_visibility_count = common_spatial_pass.do_disocclusion_reuse_boost ? common_spatial_pass.disocclusion_reuse_count : common_spatial_pass.reuse_neighbor_count;\n\n\t\tcommon_spatial_pass.compute_spatial_reuse_hit_rate = false;\n\n\n\n\n\t\tneighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\tneighbor_similarity_settings.normal_similarity_angle_degrees = 37.5f;\n\t\tneighbor_similarity_settings.normal_similarity_angle_precomp = 0.906307787f;\n\t\tneighbor_similarity_settings.reject_using_geometric_normals = true;\n\n\t\tneighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\tneighbor_similarity_settings.plane_distance_threshold = 0.1f;\n\n\t\tneighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\tneighbor_similarity_settings.roughness_similarity_threshold = 0.25f;\n\n\t\tuse_jacobian_rejection_heuristic = true;\n\t\tjacobian_rejection_threshold = 15.0f;\n\n\t\tuse_neighbor_sample_point_roughness_heuristic = true;\n\t\tneighbor_sample_point_roughness_threshold = 0.1f;\n\n\t\t// Very very small m-cap to avoid correlations\n\t\tm_cap = 1;\n\t\tuse_confidence_weights = true;\n\n\t\tdebug_view = ReSTIRGIDebugView::NO_DEBUG;\n\t\tdebug_view_scale_factor = 1.0f;\n\t}\n\n\tReSTIRGIInitialCandidatesPassSettings initial_candidates;\n\tReSTIRGITemporalPassSettings temporal_pass;\n\tReSTIRGISpatialPassSettings spatial_pass;\n\t\n\tReSTIRGIReservoir* restir_output_reservoirs = nullptr;\n\n\tReSTIRGIDebugView debug_view;\n\tfloat debug_view_scale_factor;\n\n\t// If a neighbor has its sample point on a glossy surface, we don't want to reuse\n\t// that sample with the reconnection shift if it is below a given roughness threshold because\n\t// the BSDF at the neighbor's glossy sample point is going to evaluate to 0 anyways if we change\n\t// its view direction\n\tbool use_neighbor_sample_point_roughness_heuristic;\n\tfloat neighbor_sample_point_roughness_threshold;\n\n\tbool use_jacobian_rejection_heuristic;\n\n\tHIPRT_HOST_DEVICE float get_jacobian_heuristic_threshold() const\n\t{\n\t\tif (use_jacobian_rejection_heuristic)\n\t\t\treturn jacobian_rejection_threshold;\n\t\telse\n\t\t\t// Returning a super high threshold so that neighbors are basically\n\t\t\t// never rejected based on their jacobian\n\t\t\treturn 1.0e20f;\n\t}\n\n\t/**\n\t * This function is used by ImGui to get a pointer to the private member\n\t */\n\tHIPRT_HOST_DEVICE float* get_jacobian_heuristic_threshold_pointer() { return &jacobian_rejection_threshold; }\n\n\tHIPRT_HOST_DEVICE void set_jacobian_heuristic_threshold(float new_threshold) { jacobian_rejection_threshold = new_threshold; }\n\nprivate:\n\tfloat jacobian_rejection_threshold;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/ReSTIRSettingsHelper.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RESTI_SETTINGS_HELPER_H\n#define HOST_DEVICE_COMMON_RESTI_SETTINGS_HELPER_H\n\n#include \"Device/includes/ReSTIR/Surface.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\ntemplate <bool IsReSTIRGI>\nstruct ReSTIRSettingsTypeTemplate {};\n\ntemplate <>\nstruct ReSTIRSettingsTypeTemplate<false>\n{\n\tusing Type = ReSTIRDISettings;\n};\n\ntemplate <>\nstruct ReSTIRSettingsTypeTemplate<true>\n{\n\tusing Type = ReSTIRGISettings;\n};\n\ntemplate <bool IsReSTIRGI>\nusing ReSTIRSettingsType = typename ReSTIRSettingsTypeTemplate<IsReSTIRGI>::Type;\n\n\nstruct ReSTIRSettingsHelper\n{\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static ReSTIRSettingsType<IsReSTIRGI> get_restir_settings(const HIPRTRenderData& render_data)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static ReSTIRCommonSpatialPassSettings get_restir_spatial_pass_settings(const HIPRTRenderData& render_data)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings.common_spatial_pass;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings.common_spatial_pass;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static ReSTIRCommonSpatialPassSettings& get_restir_spatial_pass_settings(HIPRTRenderData& render_data)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings.common_spatial_pass;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings.common_spatial_pass;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static ReSTIRCommonTemporalPassSettings get_restir_temporal_pass_settings(const HIPRTRenderData& render_data)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings.common_temporal_pass;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings.common_temporal_pass;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static ReSTIRCommonNeighborSimiliaritySettings get_restir_neighbor_similarity_settings(const HIPRTRenderData& render_data)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings.neighbor_similarity_settings;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings.neighbor_similarity_settings;\n\t}\n\n\t/**\n\t * Returns the M value of a reservoir from the spatial pass input buffer given its pixel index\n\t *\n\t * The template argument can be used to select between ReSTIR DI and ReSTIR GI spatial buffers\n\t */\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static int get_restir_spatial_pass_input_reservoir_M(const HIPRTRenderData& render_data, int pixel_index)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn render_data.render_settings.restir_gi_settings.spatial_pass.input_reservoirs[pixel_index].M;\n\t\telse\n\t\t\treturn render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs[pixel_index].M;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static unsigned long long int get_spatial_reuse_direction_mask_ull(const HIPRTRenderData& render_data, int pixel_index)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t{\n#if ReSTIR_GI_SpatialDirectionalReuseBitCount > 32\n\t\t\treturn render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull[pixel_index];\n#else\n\t\t\treturn render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u[pixel_index];\n#endif\n\t\t}\n\t\telse\n\t\t{\n#if ReSTIR_DI_SpatialDirectionalReuseBitCount > 32\n\t\t\treturn render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull[pixel_index];\n#else\n\t\t\treturn render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u[pixel_index];\n#endif\n\t\t}\n\t}\n\n\t/**\n\t * Returns the shading normal or geometric normal of the given surface depending on the rejection heuristics settings\n\t */\n\ttemplate <bool IsReSTIRGI>\n\tHIPRT_HOST_DEVICE static float3 get_normal_for_rejection_heuristic(const HIPRTRenderData& render_data, const ReSTIRSurface& surface)\n\t{\n\t\tif constexpr (IsReSTIRGI)\n\t\t{\n\t\t\tif (render_data.render_settings.restir_gi_settings.neighbor_similarity_settings.reject_using_geometric_normals)\n\t\t\t\treturn surface.geometric_normal;\n\t\t\telse\n\t\t\t\treturn surface.shading_normal;\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (render_data.render_settings.restir_di_settings.neighbor_similarity_settings.reject_using_geometric_normals)\n\t\t\t\treturn surface.geometric_normal;\n\t\t\telse\n\t\t\t\treturn surface.shading_normal;\n\t\t}\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/RenderBuffers.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RENDER_BUFFERS_H\n#define HOST_DEVICE_COMMON_RENDER_BUFFERS_H\n\n#include \"Device/includes/AliasTable.h\"\n#include \"Device/includes/GMoN/GMoNDevice.h\"\n\n#include \"HostDeviceCommon/Material/MaterialPackedSoA.h\"\n#include \"HostDeviceCommon/PrecomputedEmissiveTrianglesDataSoADevice.h\"\n\nstruct RenderBuffers\n{\n\t// Sum of samples color per pixel. Should not be\n\t// pre-divided by the number of samples i.e. this buffer\n\t// contains pure accumulation of pixel colors\n\tColorRGB32F* accumulated_ray_colors = nullptr;\n\n\t// Data for the GMoN estimator\n\tGMoNDevice gmon_estimator;\n\n\t// A device pointer to the buffer of triangles vertex indices\n\t// triangles_indices[0], triangles_indices[1] and triangles_indices[2]\n\t// represent the indices of the vertices of the first triangle for example\n\tint* triangles_indices = nullptr;\n\t// A device pointer to the buffer of triangle vertices positions\n\tfloat3* vertices_positions = nullptr;\n\t// A device pointer to a buffer filled with 0s and 1s that\n\t// indicates whether or not a vertex normal is available for\n\t// the given vertex index\n\tunsigned char* has_vertex_normals = nullptr;\n\t// The smooth normal at each vertex of the scene\n\t// Needs to be indexed by a vertex index\n\tfloat3* vertex_normals = nullptr;\n\t// Texture coordinates at each vertices\n\tfloat2* texcoords = nullptr;\n\t// Precomputed areas of all triangles of the scene\n\tfloat* triangles_areas = nullptr;\n\t// For each emissive triangle of the scene, this buffer contains the vertex A of the triangle\n\t// as well as AB and AC edges. This is usseful for sampling a point on a triangle without having\n\t// to go through the usual\n\t//\n\t// float3 vertex_A = vertices_positions[triangles_indices[triangle_index * 3 + 0]];\n\t// float3 vertex_B = vertices_positions[triangles_indices[triangle_index * 3 + 1]];\n\t// float3 vertex_C = vertices_positions[triangles_indices[triangle_index * 3 + 2]];\n\t//\n\t// indirect fetch code which is expensive on the GPU because of the pointer chasing\n\t// \n\t// This is a remnant of some tests and it was actually more expensive than the indirect fetch code\n\t// above.\n\t// PrecomputedEmissiveTrianglesDataSoADevice precomputed_emissive_triangles_data;\n\n\t// Index of the material used by each triangle of the scene\n\tint* material_indices = nullptr;\n\t// Materials array to be indexed by an index retrieved from the \n\t// material_indices array\n\tDevicePackedTexturedMaterialSoA materials_buffer;\n\t// A buffer that can be indexed by a material_id.\n\t// \n\t// If indexing this buffer returns true, then the material is fully opaque\n\t// and there is no need to test alpha testing for it\n\t//\n\t// This is actually a buffer of bools but manipulating bools is annoying so this\n\t// is unsigned char. But the value of the unsigned char is either 0 or 1\n\tunsigned char* material_opaque = nullptr;\n\n\tint emissive_triangles_count = 0;\n\t// A buffer that contains the primitive indices of the emissive triangles of the scene\n\t// Does not contains the indices of the emissive triangles that have emissive textures\n\tint* emissive_triangles_primitive_indices = nullptr;\n\t// Same as 'emissive_triangles_primitive_indices' but does contain the indices of the emissive triangles\n\t// that have emissive textures\n\tint* emissive_triangles_primitive_indices_and_emissive_textures = nullptr;\n\t// Alias table for sampling emissives lights according to power\n\tDeviceAliasTable emissives_power_alias_table;\n\n\t// A pointer either to an array of Image8Bit or to an array of\n\t// oroTextureObject_t whether if CPU or GPU rendering respectively\n\t// This pointer can be cast for the textures to be be retrieved.\n\tvoid* material_textures = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/RenderData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RENDER_DATA_H\n#define HOST_DEVICE_COMMON_RENDER_DATA_H\n\n#include \"Device/includes/GBufferDevice.h\"\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n#include \"Device/includes/NEE++/NEE++.h\"\n\n#include \"HostDeviceCommon/BSDFsData.h\"\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"HostDeviceCommon/Math.h\"\n#include \"HostDeviceCommon/RenderBuffers.h\"\n#include \"HostDeviceCommon/RenderSettings.h\"\n#include \"HostDeviceCommon/WorldSettings.h\"\n\n#include <hiprt/hiprt_device.h>\n#include <Orochi/Orochi.h>\n\nstruct AuxiliaryBuffers\n{\n\t// Whether or not the pixel at a given index in the buffer is active or not. \n\t// \n\t// A pixel can be inactive when we're rendering at low resolution for example\n\t// (and so some pixels are not rendered) or when adaptive sampling has\n\t// judged that the pixel was converged enough and doesn't need more samples\n\tunsigned char* pixel_active = nullptr;\n\n\t// World space normals for the denoiser\n\t// These normals should already be divided by the number of samples\n\tfloat3* denoiser_normals = nullptr;\n\n\t// Albedo for the denoiser\n\t// The albedo should already be divided by the number of samples\n\tColorRGB32F* denoiser_albedo = nullptr;\n\n\t// Per pixel sample count. Useful when doing adaptive sampling\n\t// where each pixel can have a different number of sample\n\tint* pixel_sample_count = nullptr;\n\n\t// Per pixel sum of squared luminance of samples. Used for adaptive sampling\n\t// This buffer should not be pre-divided by the number of samples\n\tfloat* pixel_squared_luminance = nullptr;\n\n\t// If a given pixel has converged, this buffer contains the number of samples\n\t// that were necessary for the convergence.\n\t// \n\t// If the pixel hasn't converged yet, the buffer contains the -1 value for that pixel\n\tint* pixel_converged_sample_count = nullptr;\n\n\t// A single boolean (contained in a buffer, hence the pointer) \n\t// to indicate whether at least one single ray is still active in the kernel.\n\t// This is an unsigned char instead of a boolean because std::vector<bool>.data()\n\t// isn't standard\n\tunsigned char* still_one_ray_active = nullptr;\n\n\t// If render_settings.stop_pixel_noise_threshold > 0.0f, this buffer\n\t// (consisting of a single unsigned int) counts how many pixels have reached the\n\t// noise threshold. If this value is equal to the number of pixels of the\n\t// framebuffer, then all pixels have converged according to the given\n\t// noise threshold.\n\tAtomicType<unsigned int>* pixel_count_converged_so_far = nullptr;\n\n\t// Same for ReSTIR GI\n\tReSTIRGIReservoir* restir_gi_reservoir_buffer_1 = nullptr;\n\tReSTIRGIReservoir* restir_gi_reservoir_buffer_2 = nullptr;\n\tReSTIRGIReservoir* restir_gi_reservoir_buffer_3 = nullptr;\n};\n\n/**\n * The CPU and GPU use the same kernel code but the CPU still need some specific data\n * (the CPU BVH for example) which is stored in this structure\n */\n\nclass BVH;\nstruct CPUData\n{\n\t// BVH built over all the triangles of the scene\n\tBVH* bvh = nullptr;\n\n\t// BVH built over the emissive triangles of the scene only\n\tBVH* light_bvh = nullptr;\n};\n\n/*\n * A structure containing all the information about the scene\n * that the kernel is going to need for the render (vertices of the triangles, \n * vertices indices, skysphere data, ...)\n */\nstruct HIPRTRenderData\n{\n\t// Random number that is updated by the CPU and that can help generate a\n\t// random seed on the GPU for the random number generator to get started\n\tunsigned int random_number = 42;\n\n\t// HIPRT BVH built over all the triangles of the scene\n\thiprtGeometry GPU_BVH = nullptr;\n\t// HIPRT BVH built over the emissive triangles of the scene only\n\thiprtGeometry light_GPU_BVH = nullptr;\n\t// GPU Intersection functions (for alpha testing for example)\n\thiprtFuncTable hiprt_function_table = nullptr;\n\n\t// Size of the *global* stack per thread. Default is 32.\n\tint global_traversal_stack_buffer_size = 32;\n\thiprtGlobalStackBuffer global_traversal_stack_buffer = { 0, 0, nullptr };\n\n\tRenderBuffers buffers;\n\tBRDFsData bsdfs_data;\n\tAuxiliaryBuffers aux_buffers;\n\tGBufferDevice g_buffer;\n\tGBufferDevice g_buffer_prev_frame;\n\n\tHIPRTRenderSettings render_settings;\n\tWorldSettings world_settings;\n\n\t// Data for NEE++\n\tNEEPlusPlusDevice nee_plus_plus;\n\n\t// Camera for the current frame\n\tHIPRTCamera current_camera;\n\t// Camera of the last frame\n\tHIPRTCamera prev_camera;\n\n\t// Data only used by the CPU\n\tCPUData cpu_only;\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/RenderSettings.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"HostDeviceCommon/RenderSettings.h\"\n#include \"Renderer/GPURenderer.h\"\n\n#ifndef __KERNELCC__\nHIPRT_HOST bool HIPRTRenderSettings::use_prev_frame_g_buffer(GPURenderer* renderer) const\n{\n\t// If ReSTIR DI isn't used, we don't need the last frame's g-buffer\n\t// (as far as the codebase goes at the time of writing this function anyways)\n\tbool need_g_buffer = false;\n\tneed_g_buffer |= renderer->get_ReSTIR_DI_render_pass()->is_render_pass_used() && restir_di_settings.common_temporal_pass.do_temporal_reuse_pass;\n\tneed_g_buffer |= renderer->get_ReSTIR_GI_render_pass()->is_render_pass_used() && restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass;\n\n\treturn need_g_buffer;\n}\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/RenderSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_RENDER_SETTINGS_H\n#define HOST_DEVICE_COMMON_RENDER_SETTINGS_H\n\n#include \"Device/includes/ReSTIR/ReGIR/Settings.h\"\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n\n#include \"HostDeviceCommon/PathRussianRoulette.h\"\n#include \"HostDeviceCommon/KernelOptions/KernelOptions.h\"\n#include \"HostDeviceCommon/RIS/RISSettings.h\"\n#include \"HostDeviceCommon/ReSTIR/ReSTIRCommonSettings.h\"\n#include \"HostDeviceCommon/ReSTIR/ReSTIRDISettings.h\"\n#include \"HostDeviceCommon/ReSTIR/ReSTIRGISettings.h\"\n#include \"HostDeviceCommon/Math.h\"\n\n// Just used for initializing some structure members below\n#define local_min_macro(a, b) ((a) < (b) ? (a) : (b))\n\nclass GPURenderer;\n\nstruct HIPRTRenderSettings\n{\n\tint2 render_resolution = make_int2(1280, 720);\n\n\t// If true, the camera ray kernel will reset all buffers to their default values.\n\t// This is mainly useful for the first frame of the render\n\tbool need_to_reset = true;\n\n\n\t// TODO DEBUG REMOVE THESE\n\t////////////////////////////////////////////////////\n\n\tstatic constexpr bool DEBUG_DEV_GMON_BLEND_WEIGHTS = true;\n\tbool DEBUG_gmon_auto_blending_weights = false;\n\tfloat DEBUG_GMON_DIVIDER = 3.0f;\n\tint DEBUG_GMON_WINDOW_SIZE = 3;\n\n\tint DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS = 4;\n\tint DEBUG_REGIR_PRE_INTEGRATION_SAMPLE_COUNT_PER_RESERVOIR = 32;\n\n\tbool enable_direct = true;\n\tAtomicType<unsigned long long int>* DEBUG_SUM_COUNT = nullptr;\n\tAtomicType<unsigned long long int>* DEBUG_SUM_TOTAL = nullptr;\n\n\t////////////////////////////////////////////////////\n\n\t// If true, then the kernels are allowed to modify the status buffers (how many pixels have converged so far, ...)\n\t// \n\t// Why is this useful?\n\t// There is a \"status\" buffer that contains the number of pixels that have converged for a kernel launch.\n\t// It is a simple counter that threads of the kernel increment if the pixel corresponding to the thread has converged.\n\t// Because thread keep incrementing this counter, we need to reset it to 0 before each kernel launch.\n\t// \n\t// To simulate multiple samples per frame and reduce CPU overhead, we can launch multiple times the kernels per frame.\n\t// We would thus need to reset the status buffer before each kernel launch but this is a synchronous operation which then\n\t// slows down the UI. This means that we cannot reset the status buffer before each kernel launch, we can only reset it\n\t// at each frame before GPURenderer::render() is called.\n\t// \n\t// In the case where we have 5 samples per pixel for example, we would have each kernel launch increment the status\n\t// buffer and that would largely go above 100% of pixels converged (which doesn't make sense). \n\t// What we do instead is that we only allow the last kernel launch of the frame to increment the status buffers.\n\t//\n\t// This is the variable that enables / disables the increment of status buffers\n\tbool do_update_status_buffers = false;\n\n\t// Whether or not to accumulate each frame to allow progressive rendering. If false,\n\t// each frame will be displayed on screen and discarded on the next frame without accumulation\n\tbool accumulate = true;\n\n\t// How many samples were accumulated in the denoiser's AOV buffers (albedo & normals)\n\t// This is used mainly for the normals AOVs because we want a way to accumulate the normals.\n\t// However, we still want to feed the normalized normals to the denoiser. \n\t// This means that we need to store normalized normals in the normals AOV GPU buffer. \n\t// But if we also want to accumulate, we also need to get the normals back from \"normalized\"\n\t// to their \"accumulated\" value. We can then add the normal of the first hit of our current\n\t// frame to that \"accumulated\" value and then normalize again.\n\t// \n\t// We need denoiser_AOV_accumulation_counter to multiply the normalized normals of the buffer with\n\t// and get that \"accumulated\" normals value.\n\tint denoiser_AOV_accumulation_counter = 0;\n\n\t// Number of samples rendered so far before (before means that this counter starts at 0) the kernel call\n\t// \n\t// This is the sum of samples_per_frame for all frames that have been rendered.\n\tunsigned int sample_number = 0;\n\t// See the DisplayOnlySampleN kernel option\n\tint output_debug_sample_N = 1;\n\n\t// How many samples to compute per pixel per frame\n\t// Higher values reduce CPU overhead since the GPU spends\n\t// more time computing per frame but reduces interactivity\n\tint samples_per_frame = 1;\n\t// Maximum number of bounces of rays in the scene. \n\t// 1 is direct light only.\n\tint nb_bounces = 5;\n\n\tbool do_russian_roulette = true;\n\t// After how many bounces can russian roulette kick in?\n\t// 0 means that the camera ray hits, and then the next bounce\n\t// is already susceptible to being terminated by russian roulette\n\tint russian_roulette_min_depth = local_min_macro(5, nb_bounces / 2);\n\t// After applying russian roulette(dividing by the continuation probability)\n\t// the energy added to the ray throughput is clamped to this maximum value.\n\t// \n\t// This is biased and darkens the image the lower the threshold but it helps\n\t// reduce variance and fireflies introduced by the russian roulette --> faster\n\t// convergence.\n\t//\n\t// 0 for no clamping.\n\tfloat russian_roulette_throughput_clamp = 10.0f;\n\n\t// What Russian roulette method to use to determine the path termination\n\t// probability\n\tPathRussianRoulette path_russian_roulette_method = PathRussianRoulette::MAX_THROUGHPUT;\n\n\t// Whether or not to \"freeze\" random number generation so that each frame uses\n\t// exactly the same random number. This allows every ray to follow the exact\n\t// same path every frame, allowing for more stable benchmarking.\n\tint freeze_random = false;\n\n\t// If true, NaNs encountered during rendering will be rendered as very bright pink. \n\t// Useful for debugging only.\n\tbool display_NaNs = true;\n\n\t// If true, then rendering at low resolution will be performed if 'wants_render_low_resolution'\n\t// is also true.\n\t// This boolean basically is an additional condition for rendering at low resolution:\n\t//\t - If we're interacting with the camera, we *want* to render at low resolution\n\t//\t but if rendering at low resolution is not allowed (this boolean), then we will still\n\t//\t not render at low resolution\n\t// This boolean is controlled by the user in Imgui\n\tbool allow_render_low_resolution = false;\n\t// If true, this means that the user is moving the camera and we want to\n\t// render the image at a much lower resolution to allow for smoother\n\t// interaction. Having this flag at true isn't sufficient for rendering at low\n\t// resolution. The user must also *allow* rendering at low resolution\n\t// with the 'allow_render_low_resolution' flag\n\tbool wants_render_low_resolution = false;\n\t// How to divide the render resolution by when rendering at low resolution\n\t// (when interacting with the camera)\n\tint render_low_resolution_scaling = 2;\n\n\tbool enable_adaptive_sampling = false;\n\t// How many samples before the adaptive sampling actually kicks in.\n\t// This is useful mainly for the per-pixel adaptive sampling method\n\t// where you want to be sure that each pixel in the image has had enough\n\t// chance find a path to a potentially \n\tint adaptive_sampling_min_samples = 96;\n\t// Adaptive sampling noise threshold\n\tfloat adaptive_sampling_noise_threshold = 0.075f;\n\n\t// If true, the rendering will stop after a certain proportion (defined by 'stop_pixel_percentage_converged')\n\t// of pixels of the image have converged. \"converged\" here is defined according to the adaptive sampling if\n\t// enabled or according to 'stop_pixel_noise_threshold' if adaptive sampling is not enabled.\n\t//\n\t// If false, the render will not stop until all pixels have converged\n\tbool use_pixel_stop_noise_threshold = true;\n\t// A percentage in [0, 100] that dictates the proportion of pixels that must\n\t// have reached the given noise threshold (stop_pixel_noise_threshold\n\t// variable) before we stop rendering.\n\t// \n\t// For example, if this variable is 90, we will stop rendering when 90% of all\n\t// pixels have reached the stop_pixel_noise_threshold\n\tfloat stop_pixel_percentage_converged = 70.0f;\n\t// Noise threshold for use with the stop_pixel_percentage_converged stopping\n\t// condition\n\tfloat stop_pixel_noise_threshold = 0.075f;\n\n\n\n\t// Clamp direct lighting contribution to reduce fireflies\n\tfloat direct_contribution_clamp = 0.0f;\n\t// Clamp envmap contribution to reduce fireflies\n\tfloat envmap_contribution_clamp = 0.0f;\n\t// Clamp indirect lighting contribution to reduce fireflies\n\tfloat indirect_contribution_clamp = 0.0f;\n\n\t// If a selected light (for direct lighting estimation) contributes at a given\n\t// point less than this 'minimum_light_contribution' value then the light sample is discarded\n\t// 0.0f to disable\n\tfloat minimum_light_contribution = 0.0f;\n\n\t// Whether or not to do alpha testing for geometry with transparent base color textures\n\tbool do_alpha_testing = true;\n\t// At what bounce to stop doing alpha testing\n\t// \n\t// A value of 0 means that alpha testing isn't done at bounce 0 which means that even camera\n\t// rays do not do alpha testing --> alpha testing is disable\n\t// \n\t// A value of 1 means that camera rays do alpha testing but the next bounce rays do not do alpha\n\t// testing\n\t//\n\t// Shadow rays for NEE are also affected by this setting\n\tint alpha_testing_indirect_bounce = nb_bounces + 1;\n\n\t// Whether or not to do normal mapping at all\n\t// If false, geometric normals will always be used\n\tbool do_normal_mapping = true;\n\n\t// Settings for RIS (direct light sampling)\n\tRISSettings ris_settings;\n\n\t// Settings for ReSTIR DI\n\tReSTIRDISettings restir_di_settings;\n\t// Settings for ReSTIR GI\n\tReSTIRGISettings restir_gi_settings;\n\n\tReGIRSettings regir_settings;\n\n\t/**\n\t * Returns true if the current frame should be renderer at low resolution, false otherwise.\n\t * \n\t * This function is a simple helper that combines a few flags to make sure that we\n\t * actually want to render at low resolution\n\t */\n\tHIPRT_HOST_DEVICE bool do_render_low_resolution() const\n\t{\n\t\treturn wants_render_low_resolution && allow_render_low_resolution && accumulate;\n\t}\n\n\t/**\n\t * Returns true if the adaptive sampling buffers are ready for use, false otherwise.\n\t *\n\t * Adaptive sampling buffers are \"ready for use\" if the adaptive sampling is enabled or\n\t * if the pixel stop noise threshold is enabled. Otherwise, the adaptive sampling buffers\n\t * are freed to save VRAM so they cannot be used.\n\t */\n\tHIPRT_HOST_DEVICE bool has_access_to_adaptive_sampling_buffers() const\n\t{\n\t\tbool has_access = false;\n\n\t\thas_access |= stop_pixel_noise_threshold > 0.0f;\n\t\thas_access |= enable_adaptive_sampling;\n\t\t// Cannot use adaptive sampling without accumulation\n\t\thas_access &= accumulate;\n\n\t\treturn has_access;\n\t}\n\n\t/**\n\t * Returns true if the renderer needs the G-buffer of the previous frame.\n\t * \n\t * The boolean parameter is some additional condition that must be satisfied\n\t * for the G-buffer to be needed\n\t * \n\t * We need two overrides of this function: one for use in the shaders and one \n\t * for use in the C++ CPU side code.\n\t * \n\t * This is because to determine whether or not we need the g-buffer of last\n\t * frame, we need to check if ReSTIR DI is being used or not. On the CPP side, this\n\t * can be done with the GPURenderer instance by checking the path tracer\n\t * options and check if the DirectLightSamplingStrategy is equal to\n\t * LSS_RESTIR_DI. On the device however, we don't have access to the\n\t * GPURenderer instance but instead, we can check directly using the \n\t * DirectLightSamplingStrategy macro (and we don't want the GPURenderer parameter \n\t * because that doesn't exist on the device).\n\t */\n\tHIPRT_DEVICE bool use_prev_frame_g_buffer() const\n\t{\n\t\t// If ReSTIR DI isn't used, we don't need the last frame's g-buffer\n\t\t// (as far as the codebase goes at the time of writing this function anyways)\n\t\tbool need_g_buffer = false;\n\t\tneed_g_buffer |= DirectLightSamplingStrategy == LSS_RESTIR_DI && restir_di_settings.common_temporal_pass.do_temporal_reuse_pass;\n\t\tneed_g_buffer |= PathSamplingStrategy == PSS_RESTIR_GI && restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass;\n\n\t\treturn need_g_buffer;\n\t}\n\n\t// Only need this one on the host\n#ifndef __KERNELCC__\n\tHIPRT_HOST bool use_prev_frame_g_buffer(GPURenderer* renderer) const;\n#endif\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/WorldSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_WORLD_SETTINGS_H\n#define HOST_DEVICE_COMMON_WORLD_SETTINGS_H\n\n#include \"Device/includes/AliasTable.h\"\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Packing.h\"\n\nenum AmbientLightType\n{\n\tNONE,\n\tUNIFORM,\n\tENVMAP\n};\n\nstruct WorldSettings\n{\n\tAmbientLightType ambient_light_type = AmbientLightType::UNIFORM;\n\tColorRGB32F uniform_light_color = ColorRGB32F(0.0f);\n\n\t// Width and height in pixels. Both in the range [1, XXX]\n\tunsigned int envmap_width = 0, envmap_height = 0;\n\t// Simple scale multiplier on the envmap color read from the envmap texture\n\t// in the shader\n\tfloat envmap_intensity = 1.0f;\n\t// If true, the background of the scene (where rays directly miss any geometry\n\t// and we directly see the skysphere) will scale with the envmap_intensity coefficient.\n\t// This can be visually unpleasing because the background will most likely\n\t// become completely white and blown out.\n\tint envmap_scale_background_intensity = false;\n\n\t// Packed RGBE 9/9/9/5 envmap texels\n\tRGBE9995Packed* envmap;\n\n\t// Luminance sum of all the texels of the envmap\n\tfloat envmap_total_sum = 0.0f;\n\n\t// Cumulative distribution function. 1D float array of length width * height for\n\t// importance sampling the envmap with a binary search strategy\n\tfloat* envmap_cdf = nullptr;\n\n\t// Probabilities and aliases for sampling the envmap with the alias table strategy\n\tDeviceAliasTable envmap_alias_table;\n\n\t// Rotation matrix for rotating the envmap around in the current frame\n\tfloat3x3 envmap_to_world_matrix = float3x3{ { {1.0f, 0.0f, 0.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 0.0f, 1.0f} } };\n\tfloat3x3 world_to_envmap_matrix = float3x3{ { {1.0f, 0.0f, 0.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 0.0f, 1.0f} } };\n};\n\n#endif\n"
  },
  {
    "path": "src/HostDeviceCommon/Xorshift.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_XORSHIFT_H\n#define HOST_DEVICE_COMMON_XORSHIFT_H\n\n#include <hiprt/hiprt_device.h>\n\n#include \"HostDeviceCommon/Math.h\"\n\nstruct Xorshift32State \n{\n    unsigned int seed = 42;\n};\n\nstruct Xorshift32Generator\n{\n    static const unsigned int XORSHIFT_MAX = 0xffffffff;\n\n    HIPRT_DEVICE Xorshift32Generator()\n    {\n        m_state.seed = 42;\n    }\n\n    HIPRT_DEVICE Xorshift32Generator(unsigned int seed)\n    {\n        m_state.seed = seed;\n    }\n\n    /*\n     * Returns a uniform random number between 0 and\n     * array_size - 1 (included)\n     */\n    HIPRT_DEVICE int random_index(int array_size)\n    {\n        int random_num = xorshift32() / static_cast<float>(XORSHIFT_MAX) * array_size;\n        return hippt::min(random_num, array_size - 1);\n    }\n\n    /*\n     * Returns a float int [0, 1.0 - 1.0e-9f]\n     */\n    HIPRT_DEVICE float operator()()\n    {\n        //Float in [0, 1[\n        float a = xorshift32() / static_cast<float>(XORSHIFT_MAX);\n        return hippt::min(a, 1.0f - 1.0e-7f);\n    }\n\n    /**\n     * Returns a random uint\n     */\n    HIPRT_DEVICE unsigned int xorshift32()\n    {\n        /* Algorithm \"xor\" from p. 4 of Marsaglia, \"Xorshift RNGs\" */\n        unsigned int x = m_state.seed;\n        x ^= x << 13;\n        x ^= x >> 17;\n        x ^= x << 5;\n        return m_state.seed = x;\n    }\n\n    Xorshift32State m_state;\n};\n\n#endif\n"
  },
  {
    "path": "src/Image/EnvmapRGBE9995.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DEVICE_RGBE9995_ENVMAP_H\n#define DEVICE_RGBE9995_ENVMAP_H\n\n#include \"HostDeviceCommon/Packing.h\"\n#include \"Image/Image.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n\n/**\n * If GPU is true, then functions of this class will be templated such\n * that they compute / return data that can be used on the GPU\n */\ntemplate <bool GPU>\nclass RGBE9995Envmap\n{\npublic:\n\tHIPRT_HOST void pack_from(const Image32Bit& image)\n\t{\n\t\tpacked_data_CPU.resize(image.width * image.height);\n\n#pragma omp parallel for\n\t\tfor (int y = 0; y < image.height; y++)\n\t\t{\n\t\t\tfor (int x = 0; x < image.width; x++)\n\t\t\t{\n\t\t\t\tint index = x + y * image.width;\n\n\t\t\t\tpacked_data_CPU[index].pack(image.get_pixel_ColorRGB32F(index));\n\t\t\t}\n\t\t}\n\n\t\tif (GPU)\n\t\t{\n\t\t\t// If the data is for the GPU, upload the data to the GPU buffer and then discard the CPU data\n\n\t\t\tpacked_data_GPU.resize(image.width * image.height);\n\t\t\tpacked_data_GPU.upload_data(packed_data_CPU);\n\n\t\t\t// Clearing the CPU data\n\t\t\tpacked_data_CPU = std::vector<RGBE9995Packed>();\n\t\t}\n\t}\n\n\tHIPRT_HOST RGBE9995Packed* get_data_pointer()\n\t{\n\t\tif (GPU)\n\t\t\treturn packed_data_GPU.get_device_pointer();\n\t\telse\n\t\t\treturn packed_data_CPU.data();\n\t}\n\nprivate:\n\t// Linear array for the packed data of the envmap\n\tOrochiBuffer<RGBE9995Packed> packed_data_GPU;\n\n\tstd::vector<RGBE9995Packed> packed_data_CPU;\n};\n\n#endif\n"
  },
  {
    "path": "src/Image/Image.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Image/Image.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\n// This CPP file is used to define the STBI implementation once and for all\n#define STB_IMAGE_IMPLEMENTATION\n#define STB_IMAGE_WRITE_IMPLEMENTATION\n\n#include \"stb_image.h\"\n#include \"stb_image_write.h\"\n\n#include \"tinyexr.cc\"\n\n#include <deque>\n\nImage8Bit::Image8Bit(int width, int height, int channels) : Image8Bit(std::vector<unsigned char>(width * height * channels, 0), width, height, channels) {}\n\nImage8Bit::Image8Bit(const unsigned char* data, int width, int height, int channels) : width(width), height(height), channels(channels)\n{\n    m_pixel_data = std::vector<unsigned char>();\n    m_pixel_data.insert(m_pixel_data.end(), &data[0], &data[width * height * channels]);\n}\n\nImage8Bit::Image8Bit(const std::vector<unsigned char>& data, int width, int height, int channels) : width(width), height(height), channels(channels), m_pixel_data(data) {}\n\nImage8Bit Image8Bit::read_image(const std::string& filepath, int output_channels, bool flipY)\n{\n    stbi_set_flip_vertically_on_load_thread(flipY);\n\n    int width, height, read_channels;\n    unsigned char* pixels = stbi_load(filepath.c_str(), &width, &height, &read_channels, output_channels);\n\n    if (!pixels)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error reading image %s: %s\", filepath.c_str(), stbi_failure_reason());\n        return Image8Bit();\n    }\n\n    Image8Bit output_image(width, height, output_channels);\n#pragma omp parallel for\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = x + y * width;\n\n            for (int i = 0; i < output_channels; i++)\n                output_image[index * output_channels + i] = pixels[index * output_channels + i];\n        }\n    }\n\n    stbi_image_free(pixels);\n    return output_image;\n}\n\nImage8Bit Image8Bit::read_image_hdr(const std::string& filepath, int output_channels, bool flipY)\n{\n    stbi_set_flip_vertically_on_load(flipY);\n\n    int width, height, read_channels;\n    float* pixels = stbi_loadf(filepath.c_str(), &width, &height, &read_channels, output_channels);\n\n    if (!pixels)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error reading image %s: %s\", filepath.c_str(), stbi_failure_reason());\n        return Image8Bit();\n    }\n\n    std::vector<unsigned char> converted_data(width * height * output_channels);\n#pragma omp parallel for\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = x + y * width;\n\n            for (int i = 0; i < output_channels; i++)\n            {\n                converted_data[index * output_channels + i] = static_cast<unsigned char>(pixels[index * output_channels + i]);\n            }\n        }\n    }\n\n    stbi_image_free(pixels);\n    return Image8Bit(converted_data, width, height, output_channels);\n}\n\nbool Image8Bit::write_image_png(const char* filename, const bool flipY) const\n{\n    if (byte_size() == 0)\n        return false;\n\n    std::vector<unsigned char> tmp(width * height * channels);\n    for (unsigned i = 0; i < width * height; i++)\n        for (int j = 0; j < channels; j++)\n            tmp[i * channels + j] = hippt::clamp(static_cast<unsigned char>(0), static_cast<unsigned char>(255), m_pixel_data[i * channels + j]);\n\n    stbi_flip_vertically_on_write(flipY);\n    return stbi_write_png(filename, width, height, channels, tmp.data(), width * channels) != 0;\n}\n\nbool Image8Bit::write_image_hdr(const char* filename, const bool flipY) const\n{\n    if (byte_size() == 0)\n        return false;\n\n    std::vector<float> tmp(width * height * channels);\n    for (unsigned i = 0; i < width * height; i++)\n        for (int j = 0; j < channels; j++)\n            tmp[i * channels + j] = m_pixel_data[i * channels + j] / 255.0f;\n\n    stbi_flip_vertically_on_write(flipY);\n    return stbi_write_hdr(filename, width, height, channels, reinterpret_cast<const float*>(m_pixel_data.data())) != 0;\n}\n\nfloat Image8Bit::luminance_of_pixel(int x, int y) const\n{\n    int start_pixel = (x + y * width) * channels;\n\n    // Computing the luminance with a *maximum* of 3 components.\n    // \n    // If the texture only has one component (i.e. only red), the following\n    // loop will only loop through the red component with the right weight. \n    // \n    // If the image has more than 1 components, 3 for example, then we'll loop through\n    // the 3 components and apply the weights.\n    // \n    // If the image has 4 components, we will still only take RGB into account for the\n    // luminance computation but not alpha\n    float luminance = 0.0f;\n    float weights[3] = { 0.3086f, 0.6094f, 0.0820f };\n    for (int i = 0; i < hippt::min(channels, 3); i++)\n        luminance += m_pixel_data[start_pixel + i] * weights[i];\n\n    return luminance;\n}\n\nfloat Image8Bit::luminance_of_area(int start_x, int start_y, int stop_x, int stop_y) const\n{\n    float luminance = 0.0f;\n\n    for (int x = start_x; x < stop_x; x++)\n        for (int y = start_y; y < stop_y; y++)\n            luminance += luminance_of_pixel(x, y);\n\n    return luminance;\n}\n\nfloat Image8Bit::luminance_of_area(const ImageBin& area) const\n{\n    return luminance_of_area(area.x0, area.y0, area.x1, area.y1);\n}\n\nColorRGBA32F Image8Bit::sample_rgba32f(float2 uv) const\n{\n    // Sampling in repeat mode so we're just keeping the fractional part\n    float u = uv.x;\n    if (u != 1.0f)\n        // Only doing that if u != 1.0f because if we actually have\n        // uv.x == 1.0f, then subtracting static_cast<int>(uv.x) will\n        // give us 0.0f even though we actually want 1.0f (which is correct).\n        // \n        // Basically, 1.0f gets transformed into 0.0f even though 1.0f is a correct\n        // U coordinate which needs not to be wrapped\n        u -= static_cast<int>(uv.x);\n\n    float v = uv.y;\n    if (v != 1.0f)\n        // Same for v\n        v -= static_cast<int>(uv.y);\n\n    // For negative UVs, we also want to repeat and we want, for example, \n    // -0.1f to behave as 0.9f\n    u = u < 0 ? 1.0f + u : u;\n    v = v < 0 ? 1.0f + v : v;\n\n    // Sampling with [0, 0] bottom-left convention\n    v = 1.0f - v;\n\n    int x = (u * (width - 1));\n    int y = (v * (height - 1));\n\n    ColorRGBA32F out_color;\n    for (int i = 0; i < channels; i++)\n        out_color[i] = m_pixel_data[(x + y * width) * channels + i] / 255.0f;\n\n    return out_color;\n}\n\nvoid Image8Bit::set_data(const std::vector<unsigned char>& data)\n{\n    m_pixel_data = data;\n}\n\nconst std::vector<unsigned char>& Image8Bit::data() const\n{\n    return m_pixel_data;\n}\n\nstd::vector<unsigned char>& Image8Bit::data()\n{\n    return m_pixel_data;\n}\n\nconst unsigned char& Image8Bit::operator[](int index) const\n{\n    return m_pixel_data[index];\n}\n\nunsigned char& Image8Bit::operator[](int index)\n{\n    return m_pixel_data[index];\n}\n\nstd::vector<float> Image8Bit::compute_cdf() const\n{\n    std::vector<float> out_cdf;\n    out_cdf.resize(height * width);\n    out_cdf[0] = 0.0f;\n\n    float max_radiance = 0.0f;\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = y * width + x;\n\n            out_cdf[index] = out_cdf[std::max(index - 1, 0)] + luminance_of_pixel(x, y);\n\n            for (int i = 0; i < hippt::min(3, channels); i++)\n                max_radiance = hippt::max(max_radiance, static_cast<float>(m_pixel_data[(x + y * width) * channels + i]));\n        }\n    }\n\n    return out_cdf;\n}\n\nsize_t Image8Bit::byte_size() const\n{\n    return width * height * sizeof(unsigned char);\n}\n\nbool Image8Bit::is_constant_color(int threshold) const\n{\n    if (width == 0 || height == 0)\n        // Incorrect image\n        return false;\n\n    std::vector<unsigned char> first_pixel_color(channels);\n    for (int i = 0; i < channels; i++)\n        first_pixel_color[i] = m_pixel_data[i];\n\n    // Comparing the first pixel to all pixels of the texture and returning as soon as we find one\n    // that is not within the threshold\n    for (int y = 0; y < height; y++)\n        for (int x = 0; x < width; x++)\n            for (int i = 0; i < channels; i++)\n                if (std::abs(first_pixel_color[i] - m_pixel_data[(y * width + x) * channels + i]) > threshold)\n                    return false;\n\n    return true;\n}\n\nbool Image8Bit::is_fully_opaque() const\n{\n    if (width == 0 || height == 0)\n        // Incorrect image\n        return false;\n\n    if (channels < 4)\n        // No alpha channel so this is fully opaque\n        return true;\n\n    // Comparing the first pixel to all pixels of the texture and returning as soon as we find one\n    // that is not within the threshold\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            // Checking the alpha channel\n            // Assuming 4 channels if we're here\n            unsigned char alpha_channel = m_pixel_data[(y * width + x) * 4 + 3];\n            if (alpha_channel != 255)\n                return false;\n        }\n    }\n\n    return true;\n}\n\nvoid Image8Bit::free()\n{\n    m_pixel_data.clear();\n    width = 0;\n    height = 0;\n    channels = 0;\n}\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nImage32Bit::Image32Bit(int width, int height, int channels) : Image32Bit(std::vector<float>(width * height * channels, 0), width, height, channels) {}\n\nImage32Bit::Image32Bit(const float* data, int width, int height, int channels) : width(width), height(height), channels(channels)\n{\n    m_pixel_data = std::vector<float>();\n    m_pixel_data.insert(m_pixel_data.end(), &data[0], &data[width * height * channels]);\n}\n\nImage32Bit::Image32Bit(const std::vector<float>& data, int width, int height, int channels) : width(width), height(height), channels(channels), m_pixel_data(data) {}\n\nImage32Bit::Image32Bit(Image8Bit image, int channels)\n{\n    int input_channels = image.channels;\n    int output_channels = channels == -1 ? image.channels : channels;\n\n\tm_pixel_data.resize(image.width * image.height * output_channels);\n\n    for (int y = 0; y < image.height; y++)\n    {\n        for (int x = 0; x < image.width; x++)\n        {\n            int index = x + y * image.width;\n            for (int i = 0; i < output_channels; i++)\n                m_pixel_data[index * output_channels + i] = static_cast<float>(image[index * input_channels + i]) / 255.0f;\n        }\n\t}\n\n    width = image.width;\n    height = image.height;\n    this->channels = output_channels;\n}\n\nImage32Bit Image32Bit::read_image(const std::string& filepath, int output_channels, bool flipY)\n{\n    stbi_set_flip_vertically_on_load(flipY);\n\n    int width, height, read_channels;\n    unsigned char* pixels = stbi_load(filepath.c_str(), &width, &height, &read_channels, output_channels);\n\n    if (!pixels)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error reading image %s: %s\", filepath.c_str(), stbi_failure_reason());\n        return Image32Bit();\n    }\n\n    Image32Bit output_image(width, height, output_channels);\n#pragma omp parallel for\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = x + y * width;\n\n            for (int i = 0; i < output_channels; i++)\n                output_image[index * output_channels + i] = pixels[index * output_channels + i] / 255.0f;\n        }\n    }\n\n    stbi_image_free(pixels);\n    return output_image;\n}\n\nImage32Bit Image32Bit::read_image_hdr(const std::string& filepath, int output_channels, bool flipY)\n{\n    stbi_set_flip_vertically_on_load_thread(flipY);\n\n    int width, height, read_channels;\n    float* pixels = stbi_loadf(filepath.c_str(), &width, &height, &read_channels, output_channels);\n\n    if (!pixels)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error reading image %s: %s\", filepath.c_str(), stbi_failure_reason());\n        return Image32Bit();\n    }\n\n    std::vector<float> converted_data(width * height * output_channels);\n#pragma omp parallel for\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = x + y * width;\n\n            for (int i = 0; i < output_channels; i++)\n                converted_data[index * output_channels + i] = pixels[index * output_channels + i];\n        }\n    }\n\n    stbi_image_free(pixels);\n    return Image32Bit(converted_data, width, height, output_channels);\n}\n\nImage32Bit Image32Bit::read_image_exr(const std::string& filepath, bool flipY)\n{\n    float* out;\n    int width;\n    int height;\n    const char* err = nullptr;\n\n    int ret = LoadEXR(&out, &width, &height, filepath.c_str(), &err);\n\n    if (ret != TINYEXR_SUCCESS) \n    {\n        if (err) \n        {\n            g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error reading EXR image: %s\", err);\n            FreeEXRErrorMessage(err); // release memory of error message.\n        }\n\n        return Image32Bit();\n    }\n    else \n    {\n        if (!flipY)\n        {\n            std::vector<float> vector_data(out, out + width * height * 4);\n            std::free(out); // release memory of image data\n\n            return Image32Bit(vector_data, width, height, 4);\n        }\n        else\n        {\n            std::vector<float> vector_data(width * height * 4);\n\n            for (int y = height - 1; y >= 0; y--)\n            {\n                for (int x = 0; x < width; x++)\n                {\n                    int index_y_flipped = x + (height - 1 - y) * width;\n                    int index = x + y * width;\n\n                    index *= 4; // for RGBA\n                    index_y_flipped *= 4; // for RGBA\n\n                    vector_data[index + 0] = out[index_y_flipped + 0];\n                    vector_data[index + 1] = out[index_y_flipped + 1];\n                    vector_data[index + 2] = out[index_y_flipped + 2];\n                    vector_data[index + 3] = out[index_y_flipped + 3];\n                }\n            }\n\n            std::free(out); // release memory of image data\n            return Image32Bit(vector_data, width, height, 4);\n        }\n    }\n}\n\nbool Image32Bit::write_image_png(const char* filename, const bool flipY) const\n{\n    if (byte_size() == 0)\n        return false;\n\n    std::vector<unsigned char> tmp(width * height * channels);\n    for (unsigned i = 0; i < width * height * channels; i++)\n        tmp[i] = hippt::clamp(0.0f, 255.0f, m_pixel_data[i] * 255.0f);\n\n    stbi_flip_vertically_on_write(flipY);\n    return stbi_write_png(filename, width, height, channels, tmp.data(), width * channels) != 0;\n}\n\nbool Image32Bit::write_image_hdr(const char* filename, const bool flipY) const\n{\n    if (byte_size() == 0)\n        return false;\n\n    std::vector<float> tmp(width * height * channels);\n    for (unsigned i = 0; i < width * height; i++)\n        for (int j = 0; j < channels; j++)\n            tmp[i * channels + j] = m_pixel_data[i * channels + j];\n\n    stbi_flip_vertically_on_write(flipY);\n    return stbi_write_hdr(filename, width, height, channels, reinterpret_cast<const float*>(m_pixel_data.data())) != 0;\n}\n\nImage32Bit Image32Bit::to_linear_rgb() const\n{\n    Image32Bit out(width, height, channels);\n\n#pragma omp parallel for\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int start_pixel = (x + y * width) * channels;\n            int out_start_pixel = (x + y * width) * channels;\n\n            for (int i = 0; i < channels; i++)\n            {\n                float color_component = m_pixel_data[start_pixel + i];\n\n                // Assuming gamma 2.2\n                out.m_pixel_data[out_start_pixel + i] = powf(color_component, 2.2f);\n            }\n        }\n\t}\n\n    return out;\n}\n\nfloat Image32Bit::luminance_of_pixel(int x, int y) const\n{\n    int start_pixel = (x + y * width) * channels;\n\n    // Computing the luminance with a *maximum* of 3 components.\n    // \n    // If the texture only has one component (i.e. only red), the following\n    // loop will only loop through the red component with the right weight. \n    // \n    // If the image has more than 1 components, 3 for example, then we'll loop through\n    // the 3 components and apply the weights.\n    // \n    // If the image has 4 components, we will still only take RGB into account for the\n    // luminance computation but not alpha\n    float luminance = 0.0f;\n    float weights[3] = { 0.3086f, 0.6094f, 0.0820f };\n    for (int i = 0; i < hippt::min(channels, 3); i++)\n        luminance += m_pixel_data[start_pixel + i] * weights[i];\n\n    return luminance;\n}\n\nfloat Image32Bit::luminance_of_area(int start_x, int start_y, int stop_x, int stop_y) const\n{\n    float luminance = 0.0f;\n\n    for (int x = start_x; x < stop_x; x++)\n        for (int y = start_y; y < stop_y; y++)\n            luminance += luminance_of_pixel(x, y);\n\n    return luminance;\n}\n\nfloat Image32Bit::luminance_of_area(const ImageBin& area) const\n{\n    return luminance_of_area(area.x0, area.y0, area.x1, area.y1);\n}\n\nColorRGBA32F Image32Bit::sample_rgba32f(float2 uv) const\n{\n    // Sampling in repeat mode so we're just keeping the fractional part\n    float u = uv.x;\n    if (u != 1.0f)\n        // Only doing that if u != 1.0f because if we actually have\n        // uv.x == 1.0f, then subtracting static_cast<int>(uv.x) will\n        // give us 0.0f even though we actually want 1.0f (which is correct).\n        // \n        // Basically, 1.0f gets transformed into 0.0f even though 1.0f is a correct\n        // U coordinate which needs not to be wrapped\n        u -= static_cast<int>(uv.x);\n\n    float v = uv.y;\n    if (v != 1.0f)\n        // Same for v\n        v -= static_cast<int>(uv.y);\n\n    // For negative UVs, we also want to repeat and we want, for example, \n    // -0.1f to behave as 0.9f\n    u = u < 0 ? 1.0f + u : u;\n    v = v < 0 ? 1.0f + v : v;\n\n    // Sampling with [0, 0] bottom-left convention\n    v = 1.0f - v;\n\n    int x = (u * (width - 1));\n    int y = (v * (height - 1));\n\n    ColorRGBA32F out_color;\n    for (int i = 0; i < channels; i++)\n        out_color[i] = m_pixel_data[(x + y * width) * channels + i];\n\n    return out_color;\n}\n\nvoid Image32Bit::set_data(const std::vector<float>& data)\n{\n    m_pixel_data = data;\n}\n\nconst std::vector<float>& Image32Bit::data() const\n{\n    return m_pixel_data;\n}\n\nstd::vector<float>& Image32Bit::data()\n{\n    return m_pixel_data;\n}\n\nconst float& Image32Bit::operator[](int index) const\n{\n    return m_pixel_data[index];\n}\n\nfloat& Image32Bit::operator[](int index)\n{\n    return m_pixel_data[index];\n}\n\nstd::vector<float> Image32Bit::compute_cdf() const\n{\n    std::vector<float> out_cdf;\n    out_cdf.resize(height * width);\n    out_cdf[0] = 0.0f;\n\n    float max_radiance = 0.0f;\n\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            int index = y * width + x;\n\n            out_cdf[index] = out_cdf[std::max(index - 1, 0)] + luminance_of_pixel(x, y);\n\n            for (int i = 0; i < hippt::min(3, channels); i++)\n                max_radiance = hippt::max(max_radiance, m_pixel_data[(x + y * width) * channels + i]);\n        }\n    }\n\n    return out_cdf;\n}\n\n/**\n * Reference: Vose's Alias Method [https://www.keithschwarz.com/darts-dice-coins/]\n */\nvoid Image32Bit::compute_alias_table(std::vector<float>& out_probas, std::vector<int>& out_alias, float* out_luminance_total_sum) const\n{\n    float luminance_sum = 0.0f;\n\n    std::vector<float> texel_luminance(width * height);\n    for (int y = 0; y < height; y++)\n    {\n        for (int x = 0; x < width; x++)\n        {\n            float luminance = luminance_of_pixel(x, y);\n\n            luminance_sum += luminance;\n            texel_luminance[x + y * width] = luminance;\n        }\n    }\n\n    if (out_luminance_total_sum)\n        *out_luminance_total_sum = luminance_sum;\n\n    Utils::compute_alias_table(texel_luminance, luminance_sum, out_probas, out_alias);\n}\n\nfloat Image32Bit::compute_luminance_sum() const\n{\n    float sum = 0.0f;\n\n    for (int y = 0; y < height; y++)\n        for (int x = 0; x < width; x++)\n            sum += luminance_of_pixel(x, y);\n\n    return sum;\n}\n\nsize_t Image32Bit::byte_size() const\n{\n    return width * height * sizeof(unsigned char);\n}\n\nbool Image32Bit::is_constant_color(float threshold) const\n{\n    if (width == 0 || height == 0)\n        // Incorrect image\n        return false;\n\n    std::vector<float> first_pixel_color(channels);\n    for (int i = 0; i < channels; i++)\n        first_pixel_color[i] = m_pixel_data[i];\n\n    std::atomic<bool> different_pixel_found = false;\n\n    // Comparing the first pixel to all pixels of the texture and returning as soon as we find one\n    // that is not within the threshold\n    for (int y = 0; y < height; y++)\n        for (int x = 0; x < width; x++)\n            for (int i = 0; i < channels; i++)\n                if (std::abs(first_pixel_color[i] - m_pixel_data[(y * width + x) * channels + i]) > threshold)\n                    return false;\n\n    return true;\n}\n\nColorRGB32F* Image32Bit::get_data_as_ColorRGB32F()\n{\n    return reinterpret_cast<ColorRGB32F*>(m_pixel_data.data());\n}\n\nColorRGB32F Image32Bit::get_pixel_ColorRGB32F(int pixel_index) const\n{\n    return ColorRGB32F(m_pixel_data[pixel_index * channels + 0], m_pixel_data[pixel_index * channels + 1], m_pixel_data[pixel_index * channels + 2]);\n}\n\nColorRGBA32F* Image32Bit::get_data_as_ColorRGBA32F()\n{\n    return reinterpret_cast<ColorRGBA32F*>(m_pixel_data.data());\n}\n\nColorRGBA32F Image32Bit::get_pixel_ColorRGBA32F(int pixel_index) const\n{\n    return ColorRGBA32F(m_pixel_data[pixel_index * channels + 0], m_pixel_data[pixel_index * channels + 1], m_pixel_data[pixel_index * channels + 2], m_pixel_data[pixel_index * channels + 3]);\n}\n\nvoid Image32Bit::free()\n{\n    m_pixel_data.clear();\n    width = 0;\n    height = 0;\n    channels = 0;\n}\n\nImage32Bit3D::Image32Bit3D() \n{\n    width = 0;\n    height = 0;\n    depth = 0;\n\n    channels = 0;\n}\n\nImage32Bit3D::Image32Bit3D(const std::vector<Image32Bit> images)\n{\n    m_images = images;\n\n    width = images[0].width;\n    height = images[0].height;\n    depth = images.size();\n\n    channels = images[0].channels;\n}\n\nColorRGBA32F Image32Bit3D::sample_rgba32f(float3 uvw) const\n{\n    // Sampling in repeat mode so we're just keeping the fractional part\n    float u = uvw.x;\n    if (u != 1.0f)\n        // Only doing that if u != 1.0f because if we actually have\n        // uv.x == 1.0f, then subtracting static_cast<int>(uv.x) will\n        // give us 0.0f even though we actually want 1.0f (which is correct).\n        // \n        // Basically, 1.0f gets transformed into 0.0f even though 1.0f is a correct\n        // U coordinate which needs not to be wrapped\n        u -= static_cast<int>(uvw.x);\n\n    float v = uvw.y;\n    if (v != 1.0f)\n        // Same for v\n        v -= static_cast<int>(uvw.y);\n\n    float w = uvw.z;\n    if (w != 1.0f)\n        // Same for w\n        w -= static_cast<int>(uvw.z);\n    \n\n    // For negative UVs, we also want to repeat and we want, for example, \n    // -0.1f to behave as 0.9f\n    u = u < 0 ? 1.0f + u : u;\n    v = v < 0 ? 1.0f + v : v;\n    w = w < 0 ? 1.0f + w : w;\n\n    // Sampling with [0, 0] bottom-left convention\n    v = 1.0f - v;\n\n    int x = (u * (width - 1));\n    int y = (v * (height - 1));\n    int z = (w * (depth - 1));\n\n    ColorRGBA32F out_color;\n    for (int i = 0; i < channels; i++)\n        out_color[i] = m_images[z][(x + y * width) * channels + i];\n\n    return out_color;\n}\n"
  },
  {
    "path": "src/Image/Image.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMAGE_H\n#define IMAGE_H\n\n#include \"HostDeviceCommon/Color.h\"\n\n#include \"stb_image.h\"\n#include \"stb_image_write.h\"\n\n#include <string>\n#include <type_traits>\n\nstruct ImageBin\n{\n    int x0, x1;\n    int y0, y1;\n};\n\nclass Image8Bit\n{\npublic:\n    Image8Bit() : width(0), height(0), channels(0) {}\n    Image8Bit(int width, int height, int channels);\n    Image8Bit(const unsigned char* data, int width, int height, int channels);\n    Image8Bit(const std::vector<unsigned char>& data, int width, int height, int channels);\n\n    static Image8Bit read_image(const std::string& filepath, int output_channels, bool flipY);\n    static Image8Bit read_image_hdr(const std::string& filepath, int output_channels, bool flipY);\n\n    bool write_image_png(const char* filename, const bool flipY = true) const;\n    bool write_image_hdr(const char* filename, const bool flipY = true) const;\n\n    float luminance_of_pixel(int x, int y) const;\n    float luminance_of_area(int start_x, int start_y, int stop_x, int stop_y) const;\n    float luminance_of_area(const ImageBin& area) const;\n\n    ColorRGBA32F sample_rgba32f(float2 uv) const;\n\n    void set_data(const std::vector<unsigned char>& data);\n    const std::vector<unsigned char>& data() const;\n    std::vector<unsigned char>& data();\n\n    const unsigned char& operator[](int index) const;\n    unsigned char& operator[](int index);\n\n    std::vector<float> compute_cdf() const;\n\n    size_t byte_size() const;\n\n    /** \n     * Returns true if all the pixels of the texture are the same color\n     * False otherwise\n     *\n     * A threshold can be given to assume that a color is equal to another\n     * if the R, G and B channels of the two colors are each within 'threshold'\n     * distance\n     */ \n     bool is_constant_color(int threshold = 0) const;\n\n     /**\n      * Returns true if all pixels of the image have 1.0f alpha channel.\n      * Returns true if the texture has less than 4 channels\n      * \n      * Returns false otherwise\n      */\n     bool is_fully_opaque() const;\n\n    /**\n     * Frees the data of this image and sets its width, height and channels back to 0\n     */\n    void free();\n\n    int width, height, channels;\n\nprotected:\n    std::vector<unsigned char> m_pixel_data;\n};\n\nclass Image32Bit\n{\npublic:\n    Image32Bit() {}\n    Image32Bit(int width, int height, int channels);\n    Image32Bit(const float* data, int width, int height, int channels);\n    Image32Bit(const std::vector<float>& data, int width, int height, int channels);\n    Image32Bit(Image8Bit image, int channels = -1);\n\n    static Image32Bit read_image(const std::string& filepath, int output_channels, bool flipY);\n    static Image32Bit read_image_hdr(const std::string& filepath, int output_channels, bool flipY);\n    static Image32Bit read_image_exr(const std::string& filepath, bool flipY);\n\n    bool write_image_png(const char* filename, const bool flipY = true) const;\n    bool write_image_hdr(const char* filename, const bool flipY = true) const;\n\n    Image32Bit to_linear_rgb() const;\n\n    float luminance_of_pixel(int x, int y) const;\n    float luminance_of_area(int start_x, int start_y, int stop_x, int stop_y) const;\n    float luminance_of_area(const ImageBin& area) const;\n\n    ColorRGB32F* get_data_as_ColorRGB32F();\n    ColorRGB32F get_pixel_ColorRGB32F(int pixel_index) const;\n    ColorRGBA32F* get_data_as_ColorRGBA32F();\n    ColorRGBA32F get_pixel_ColorRGBA32F(int pixel_index) const;\n\n    ColorRGBA32F sample_rgba32f(float2 uv) const;\n\n    void set_data(const std::vector<float>& data);\n    const std::vector<float>& data() const;\n    std::vector<float>& data();\n\n    const float& operator[](int index) const;\n    float& operator[](int index);\n\n    std::vector<float> compute_cdf() const;\n    void compute_alias_table(std::vector<float>& out_probas, std::vector<int>& out_alias, float* out_luminance_total_sum = nullptr) const;\n\n    float compute_luminance_sum() const;\n\n    size_t byte_size() const;\n\n    /** \n     * Returns true if all the pixels of the texture are the same color\n     * False otherwise\n     *\n     * A threshold can be given to assume that a color is equal to another\n     * if the R, G and B channels of the two colors are each within 'threshold'\n     * distance\n     */\n    bool is_constant_color(float threshold) const;\n\n    /**\n     * Frees the data of this image and sets its width, height and channels back to 0\n     */\n    void free();\n\n    int width = 0, height = 0, channels = 0;\n\nprotected:\n    std::vector<float> m_pixel_data;\n};\n\nclass Image32Bit3D\n{\npublic:\n    Image32Bit3D();\n    Image32Bit3D(const std::vector<Image32Bit> images);\n\n    ColorRGBA32F sample_rgba32f(float3 uvw) const;\n\n    int width, height, depth, channels;\n\nprivate:\n    std::vector<Image32Bit> m_images;\n};\n\n#endif\n"
  },
  {
    "path": "src/OpenGL/OpenGLInteropBuffer.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OPENGL_INTEROP_BUFFER_H\n#define OPENGL_INTEROP_BUFFER_H\n\n#include \"HIPRT-Orochi/HIPRTOrochiUtils.h\"\n#include \"UI/DisplayView/DisplayTextureType.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\n#include \"GL/glew.h\"\n#include \"GLFW/glfw3.h\"\n#include \"Orochi/Orochi.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\ntemplate <typename T>\nclass OpenGLInteropBuffer\n{\npublic:\n\tOpenGLInteropBuffer() {}\n\tOpenGLInteropBuffer(int element_count);\n\t~OpenGLInteropBuffer();\n\n\tGLuint get_opengl_buffer();\n\n\tvoid resize(int new_element_count);\n\tsize_t size() const;\n\tsize_t get_byte_size() const;\n\n\t/**\n\t * Makes the buffer accesible to HIP/CUDA and returns the HIP/CUDA pointer\n\t * to that buffer\n\t */\n\tT* map();\n\n\t/**\n\t * Makes the buffer accessible by OpenGL\n\t */\n\tvoid unmap();\n\n\t/**\n\t * Copies the buffer data to an OpenGL texture\n\t */\n\tvoid unpack_to_GL_texture(GLuint texture, GLint texture_unit, int width, int height, DisplayTextureType texture_type);\n\n\tvoid free();\n\nprivate:\n\tbool m_initialized = false;\n\tbool m_mapped = false;\n\tT* m_mapped_pointer = nullptr;\n\n\tsize_t m_element_count = 0;\n\n\tGLuint m_buffer_name = -1;\n\n\toroGraphicsResource_t m_buffer_resource = nullptr;\n};\n\ntemplate <typename T>\nOpenGLInteropBuffer<T>::OpenGLInteropBuffer(int element_count)\n{\n\tglCreateBuffers(1, &m_buffer_name);\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_buffer_name);\n\tglBufferData(GL_PIXEL_UNPACK_BUFFER, element_count * sizeof(T), nullptr, GL_DYNAMIC_DRAW);\n\n\tOROCHI_CHECK_ERROR(oroGraphicsGLRegisterBuffer(&m_buffer_resource, m_buffer_name, oroGraphicsRegisterFlagsNone));\n\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);\n\n\tm_initialized = true;\n\tm_mapped = false;\n\tm_element_count = element_count;\n}\n\ntemplate <typename T>\nOpenGLInteropBuffer<T>::~OpenGLInteropBuffer()\n{\n\tif (m_initialized)\n\t\tfree();\n}\n\ntemplate <typename T>\nGLuint OpenGLInteropBuffer<T>::get_opengl_buffer()\n{\n\treturn m_buffer_name;\n}\n\ntemplate <typename T>\nvoid OpenGLInteropBuffer<T>::resize(int new_element_count)\n{\n\tif (m_mapped)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to resize interop buffer while it is mapped! This is undefined behavior\");\n\n\t\treturn;\n\t}\n\n\tif (m_initialized)\n\t{\n\t\toroGraphicsUnregisterResource(m_buffer_resource);\n\n\t\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_buffer_name);\n\t\tglBufferData(GL_PIXEL_UNPACK_BUFFER, new_element_count * sizeof(T), nullptr, GL_DYNAMIC_DRAW);\n\t}\n\telse\n\t{\n\t\tglCreateBuffers(1, &m_buffer_name);\n\t\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, m_buffer_name);\n\t\tglBufferData(GL_PIXEL_UNPACK_BUFFER, new_element_count * sizeof(T), nullptr, GL_DYNAMIC_DRAW);\n\t}\n\n#ifndef OROCHI_ENABLE_CUEW\n\t// TODO hipGLGetDevices here is required for hipGraphicsGLRegisterBuffer to work. This is very scuffed.\n\tunsigned int count = 0;\n\tstd::vector<int> devices(16);\n\thipGLGetDevices(&count, devices.data(), 16, hipGLDeviceListAll);\n#endif\n\n\toroGraphicsGLRegisterBuffer(&m_buffer_resource, m_buffer_name, oroGraphicsRegisterFlagsNone);\n\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);\n\n\tm_initialized = true;\n\tm_element_count = new_element_count;\n}\n\ntemplate <typename T>\nsize_t OpenGLInteropBuffer<T>::size() const\n{\n\treturn m_element_count;\n}\n\ntemplate <typename T>\nsize_t OpenGLInteropBuffer<T>::get_byte_size() const\n{\n\treturn m_element_count * sizeof(T);\n}\n\ntemplate <typename T>\nT* OpenGLInteropBuffer<T>::map()\n{\n\tif (!m_initialized)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Mapping a buffer that hasn't been initialized!\");\n\n\t\tUtils::debugbreak();\n\n\t\treturn nullptr;\n\t}\n\n\tif (m_mapped)\n\t\t// Already mapped\n\t\treturn m_mapped_pointer;\n\n\tsize_t byte_size;\n\tOROCHI_CHECK_ERROR(oroGraphicsMapResources(1, reinterpret_cast<oroGraphicsResource_t*>(&m_buffer_resource), 0));\n\tOROCHI_CHECK_ERROR(oroGraphicsResourceGetMappedPointer((void**)(&m_mapped_pointer), &byte_size, reinterpret_cast<oroGraphicsResource_t>(m_buffer_resource)));\n\n\tm_mapped = true;\n\treturn m_mapped_pointer;\n}\n\ntemplate <typename T>\nvoid OpenGLInteropBuffer<T>::unmap()\n{\n\tif (!m_mapped)\n\t\t// Already unmapped\n\t\treturn;\n\n\tOROCHI_CHECK_ERROR(oroGraphicsUnmapResources(1, reinterpret_cast<oroGraphicsResource_t*>(&m_buffer_resource), 0));\n\n\tm_mapped = false;\n\tm_mapped_pointer = nullptr;\n}\n\ntemplate<typename T>\nvoid OpenGLInteropBuffer<T>::unpack_to_GL_texture(GLuint texture, GLint texture_unit, int width, int height, DisplayTextureType texture_type)\n{\n\tGLenum format = texture_type.get_gl_format();\n\tGLenum type = texture_type.get_gl_type();\n\n\tglActiveTexture(texture_unit);\n\tglBindTexture(GL_TEXTURE_2D, texture);\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, get_opengl_buffer());\n\tglTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, format, type, 0);\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);\n}\n\ntemplate<typename T>\nvoid OpenGLInteropBuffer<T>::free()\n{\n\tif (m_initialized)\n\t{\n\t\tglDeleteBuffers(1, &m_buffer_name);\n\n\t\tif (m_mapped)\n\t\t\tunmap();\n\n\t\tOROCHI_CHECK_ERROR(oroGraphicsUnregisterResource(reinterpret_cast<oroGraphicsResource_t>(m_buffer_resource)));\n\t}\n\telse\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Freeing an OpenGLInterop buffer that hasn't been initialized (or has been freed already)!\");\n\n\t\treturn;\n\t}\n\t\n\tm_element_count = 0;\n\tm_initialized = false;\n}\n\n#endif\n"
  },
  {
    "path": "src/OpenGL/OpenGLProgram.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"OpenGL/OpenGLProgram.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nOpenGLProgram::OpenGLProgram(const OpenGLShader& compiled_vertex)\n{\n\tm_program = glCreateProgram();\n\tglAttachShader(m_program, compiled_vertex.get_shader());\n\tglLinkProgram(m_program);\n}\n\nOpenGLProgram::OpenGLProgram(const OpenGLShader& compiled_vertex, const OpenGLShader& compiled_fragment)\n{\n\tm_program = glCreateProgram();\n\tglAttachShader(m_program, compiled_vertex.get_shader());\n\tglAttachShader(m_program, compiled_fragment.get_shader());\n\tglLinkProgram(m_program);\n}\n\nOpenGLProgram::~OpenGLProgram()\n{\n\tglDeleteProgram(m_program);\n}\n\nvoid OpenGLProgram::attach(const OpenGLShader& compiled_shader)\n{\n\tif (m_program == (unsigned int)(-1))\n\t\tm_program = glCreateProgram();\n\n\tglAttachShader(m_program, compiled_shader.get_shader());\n\tif (compiled_shader.get_shader_type() == OpenGLShader::COMPUTE_SHADER)\n\t\tm_is_compute = true;\n}\n\nvoid OpenGLProgram::link()\n{\n\tglLinkProgram(m_program);\n\n\tif (m_is_compute)\n\t\tglGetProgramiv(m_program, GL_COMPUTE_WORK_GROUP_SIZE, m_compute_threads);\n}\n\nvoid OpenGLProgram::use()\n{\n\tglUseProgram(m_program);\n}\n\nvoid OpenGLProgram::get_compute_threads(GLint threads[3])\n{\n\tif (!m_is_compute)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"This program isn't a compute shader\");\n\n\t\treturn;\n\t}\n\n\tthreads[0] = m_compute_threads[0];\n\tthreads[1] = m_compute_threads[1];\n\tthreads[2] = m_compute_threads[2];\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, int value)\n{\n\tglUniform1i(glGetUniformLocation(m_program, name), value);\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, float value)\n{\n\tglUniform1f(glGetUniformLocation(m_program, name), value);\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, const float2& value)\n{\n\tglUniform2f(glGetUniformLocation(m_program, name), value.x, value.y);\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, const float3& value)\n{\n\tglUniform3f(glGetUniformLocation(m_program, name), value.x, value.y, value.z);\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, int count, const float* values)\n{\n\tglUniform3fv(glGetUniformLocation(m_program, name), count, values);\n}\n\nvoid OpenGLProgram::set_uniform(const char* name, const float4& value)\n{\n\tglUniform4f(glGetUniformLocation(m_program, name), value.x, value.y, value.z, value.w);\n}\n\n"
  },
  {
    "path": "src/OpenGL/OpenGLProgram.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OPENGL_PROGRAM_H\n#define OPENGL_PROGRAM_H\n\n#include \"GL/glew.h\"\n#include \"HostDeviceCommon/Math.h\"\n#include \"OpenGL/OpenGLShader.h\"\n\nclass OpenGLProgram\n{\npublic:\n\tOpenGLProgram() : m_program(-1) { }\n\tOpenGLProgram(OpenGLProgram& other) = delete;\n\tOpenGLProgram(const OpenGLShader& vertex);\n\tOpenGLProgram(const OpenGLShader& compiled_vertex, const OpenGLShader& compiled_fragment);\n\t~OpenGLProgram();\n\n\tvoid attach(const OpenGLShader& compiled_shader);\n\tvoid link();\n\tvoid use();\n\n\tvoid get_compute_threads(GLint threads[3]);\n\n\tvoid set_uniform(const char* name, int value);\n\tvoid set_uniform(const char* name, float value);\n\tvoid set_uniform(const char* name, const float2& value);\n\tvoid set_uniform(const char* name, const float3& value);\n\tvoid set_uniform(const char* name, int count, const float* values);\n\tvoid set_uniform(const char* name, const float4& value);\n\nprivate:\n\n\tbool m_is_compute = false;\n\tGLuint m_program = -1;\n\tGLint m_compute_threads[3] = { 0, 0, 0 };\n};\n\n#endif"
  },
  {
    "path": "src/OpenGL/OpenGLShader.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"OpenGL/OpenGLShader.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nOpenGLShader::OpenGLShader(const std::string& source_code, ShaderType type, const std::vector<std::string>& macros)\n{\n\tm_shader_type = type;\n\n\tset_source(source_code);\n\tcompile(macros);\n}\n\nOpenGLShader::OpenGLShader(const char* filepath, ShaderType type, const std::vector<std::string>& macros)\n{\n\tm_shader_type = type;\n\n\tset_source_from_file(filepath);\n\tcompile(macros);\n}\n\nstd::string& OpenGLShader::get_source()\n{\n\treturn m_source_code;\n}\n\nconst std::string& OpenGLShader::get_source() const\n{\n\treturn m_source_code;\n}\n\nbool OpenGLShader::has_filepath() const\n{\n\treturn m_filepath.length() > 0;\n}\n\nstd::string& OpenGLShader::get_path()\n{\n\treturn m_filepath;\n}\n\nconst std::string& OpenGLShader::get_path() const\n{\n\treturn m_filepath;\n}\n\nvoid OpenGLShader::set_source(const std::string& source_code)\n{\n\tm_source_code = source_code;\n}\n\nvoid OpenGLShader::set_source_from_file(const char* filepath)\n{\n\tm_source_code = Utils::file_to_string(filepath);\n}\n\nGLuint OpenGLShader::get_shader() const\n{\n\treturn m_compiled_shader;\n}\n\nOpenGLShader::ShaderType OpenGLShader::get_shader_type() const\n{\n\treturn m_shader_type;\n}\n\nvoid OpenGLShader::compile(const std::vector<std::string>& macros /* = std::vector() */)\n{\n\tstd::string source_code = add_macros_to_source(macros);\n\n\tconst char* shader_text = source_code.c_str();\n\n\tm_compiled_shader = glCreateShader(m_shader_type);\n\tglShaderSource(m_compiled_shader, 1, &shader_text, NULL);\n\tglCompileShader(m_compiled_shader);\n\tif (!print_shader_compile_error(m_compiled_shader))\n\t{\n\t\tif (has_filepath())\n\t\t\tthrow new std::runtime_error(\"Unable to compile shader given at this path: \" + get_path());\n\t\telse\n\t\t\tthrow new std::runtime_error(\"Unable to compile shader\");\n\t}\n}\n\nbool OpenGLShader::print_shader_compile_error(GLuint shader)\n{\n\tGLint isCompiled = 0;\n\tglGetShaderiv(shader, GL_COMPILE_STATUS, &isCompiled);\n\tif (isCompiled == GL_FALSE)\n\t{\n\t\tGLint maxLength = 0;\n\t\tglGetShaderiv(shader, GL_INFO_LOG_LENGTH, &maxLength);\n\n\t\t// The maxLength includes the NULL character\n\t\tstd::vector<GLchar> errorLog(maxLength);\n\t\tglGetShaderInfoLog(shader, maxLength, &maxLength, &errorLog[0]);\n\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"%s\", errorLog.data());\n\n\t\t// Provide the infolog in whatever manor you deem best.\n\t\t// Exit with failure.\n\t\tglDeleteShader(shader); // Don't leak the shader.\n\n\t\treturn false;\n\t}\n\n\treturn true;\n}\n\nstd::string OpenGLShader::add_macros_to_source(const std::vector<std::string>& macros)\n{\n\tsize_t version_pos = m_source_code.find(\"#version\");\n\tif (version_pos != std::string::npos)\n\t{\n\t\tsize_t line_return_pos = m_source_code.find('\\n', version_pos);\n\t\tsize_t after_return = line_return_pos + 1;\n\n\t\tstd::string modified_source = m_source_code;\n\t\tfor (const std::string& macro : macros)\n\t\t\tmodified_source = modified_source.insert(after_return, macro + \"\\n\");\n\n\t\treturn modified_source;\n\t}\n\telse\n\t\tthrow new std::runtime_error(\"No #version directive found in shader...\");\n}"
  },
  {
    "path": "src/OpenGL/OpenGLShader.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OPENGL_SHADER_H\n#define OPENGL_SHADER_H\n\n#include \"GL/glew.h\"\n\n#include <string>\n#include <vector>\n\nclass OpenGLShader\n{\npublic:\n\tenum ShaderType\n\t{\n\t\tUNDEFINED,\n\t\tVERTEX_SHADER = GL_VERTEX_SHADER,\n\t\tFRAGMENT_SHADER = GL_FRAGMENT_SHADER,\n\t\tCOMPUTE_SHADER = GL_COMPUTE_SHADER\n\t};\n\n\tOpenGLShader() : m_compiled_shader(-1), m_shader_type(ShaderType::UNDEFINED) {}\n\t/**\n\t * If given, macros should be entire strings (with the '#') like \"#define MY_MACRO\", #if X, #endif, ... \n\t */\n\tOpenGLShader(const std::string& source_code, ShaderType type, const std::vector<std::string>& macros = std::vector<std::string>());\n\tOpenGLShader(const char* filepath, ShaderType type, const std::vector<std::string>& macros = std::vector<std::string>());\n\n\tstd::string& get_source();\n\tconst std::string& get_source() const;\n\n\tbool has_filepath() const;\n\tstd::string& get_path();\n\tconst std::string& get_path() const;\n\n\tvoid set_source(const std::string& source_code);\n\tvoid set_source_from_file(const char* filepath);\n\n\tGLuint get_shader() const;\n\tShaderType get_shader_type() const;\n\n\tvoid compile(const std::vector<std::string>& macros);\n\tstd::string add_macros_to_source(const std::vector<std::string>& macros);\n\n\tstatic bool print_shader_compile_error(GLuint shader);\n\nprivate:\n\tstd::string m_filepath;\n\tstd::string m_source_code;\n\n\tShaderType m_shader_type;\n\tGLuint m_compiled_shader;\n};\n\n#endif"
  },
  {
    "path": "src/Renderer/BVH.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include <algorithm>\n#include <cmath>\n#include <vector>\n\n#include \"Renderer/BVH.h\"\n\nconst float3 BoundingVolume::PLANE_NORMALS[BVHConstants::PLANES_COUNT] = {\n\tmake_float3(1, 0, 0),\n\tmake_float3(0, 1, 0),\n    make_float3(0, 0, 1),\n    make_float3(std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3),\n    make_float3(-std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3),\n    make_float3(-std::sqrt(3.0f) / 3, -std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3),\n    make_float3(std::sqrt(3.0f) / 3, -std::sqrt(3.0f) / 3, std::sqrt(3.0f) / 3),\n};\n\nBVH::BVH() : m_root(nullptr), m_triangles(nullptr) {}\nBVH::BVH(std::vector<Triangle>* triangles, int max_depth, int leaf_max_obj_count) : m_triangles(triangles)\n{\n\tBoundingVolume volume;\n\tfloat3 minimum = make_float3(INFINITY, INFINITY, INFINITY);\n\tfloat3 maximum = make_float3(-INFINITY, -INFINITY, -INFINITY);\n\n\tfor (const Triangle& triangle : *triangles)\n\t{\n\t\tvolume.extend_volume(triangle);\n\n\t\tfor (int i = 0; i < 3; i++)\n\t\t{\n\t\t\tminimum = hippt::min(minimum, triangle[i]);\n\t\t\tmaximum = hippt::max(maximum, triangle[i]);\n\t\t}\n\t}\n\n\t//We now have a bounding volume to work with\n\tbuild_bvh(max_depth, leaf_max_obj_count, minimum, maximum, volume);\n}\n\nBVH::~BVH()\n{\n\tdelete m_root;\n}\n\nvoid BVH::operator=(BVH&& bvh)\n{\n\tm_triangles = bvh.m_triangles;\n\tm_root = bvh.m_root;\n\n\tbvh.m_root = nullptr;\n}\n\nvoid BVH::build_bvh(int max_depth, int leaf_max_obj_count, float3 min, float3 max, const BoundingVolume& volume)\n{\n\tm_root = new OctreeNode(min, max);\n\n    for (int triangle_id = 0; triangle_id < m_triangles->size(); triangle_id++)\n        m_root->insert(*m_triangles, triangle_id, 0, max_depth, leaf_max_obj_count);\n\n    m_root->compute_volume(*m_triangles);\n}\n\nbool BVH::intersect(const hiprtRay& ray, hiprtHit& hit_info, void* filter_function_payload) const\n{\n    return m_root->intersect(*m_triangles, ray, hit_info, filter_function_payload);\n}\n"
  },
  {
    "path": "src/Renderer/BVH.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef BVH_H\n#define BVH_H\n\n#include \"Device/functions/FilterFunction.h\"\n\n#include \"Renderer/BoundingVolume.h\"\n#include \"Renderer/BVHConstants.h\"\n#include \"Renderer/Triangle.h\"\n\n#include <array>\n#include <atomic>\n#include <cmath>\n#include <deque>\n#include <limits>\n#include <queue>\n\n#include <hiprt/hiprt_types.h> // for hiprtRay\n\nclass BVH\n{\npublic:\n    struct OctreeNode\n    {\n        struct QueueElement\n        {\n            QueueElement(const BVH::OctreeNode* node, float t_near) : m_node(node), _t_near(t_near) {}\n\n            bool operator > (const QueueElement& a) const\n            {\n                return _t_near > a._t_near;\n            }\n\n            const OctreeNode* m_node;//Reference on the node\n\n            float _t_near;//Intersection distance used to order the elements in the priority queue used\n            //by the OctreeNode to compute the intersection with a ray\n        };\n\n        OctreeNode(float3 min, float3 max) : m_min(min), m_max(max) {}\n        ~OctreeNode()\n        {\n            if (m_is_leaf)\n                return;\n            else\n            {\n                for (int i = 0; i < 8; i++)\n                    delete m_children[i];\n            }\n        }\n\n        /*\n          * Once the objects have been inserted in the hierarchy, this function computes\n          * the bounding volume of all the node in the hierarchy\n          */\n        BoundingVolume compute_volume(const std::vector<Triangle>& triangles_geometry)\n        {\n            if (m_is_leaf)\n                for (int triangle_id : m_triangles)\n                    m_bounding_volume.extend_volume(triangles_geometry[triangle_id]);\n            else\n                for (int i = 0; i < 8; i++)\n                    m_bounding_volume.extend_volume(m_children[i]->compute_volume(triangles_geometry));\n\n            return m_bounding_volume;\n        }\n\n        void create_children(int max_depth, int leaf_max_obj_count)\n        {\n            float middle_x = (m_min.x + m_max.x) / 2;\n            float middle_y = (m_min.y + m_max.y) / 2;\n            float middle_z = (m_min.z + m_max.z) / 2;\n\n            m_children[0] = new OctreeNode(m_min, make_float3(middle_x, middle_y, middle_z));\n            m_children[1] = new OctreeNode(make_float3(middle_x, m_min.y, m_min.z), make_float3(m_max.x, middle_y, middle_z));\n            m_children[2] = new OctreeNode(m_min + make_float3(0, middle_y, 0), make_float3(middle_x, m_max.y, middle_z));\n            m_children[3] = new OctreeNode(make_float3(middle_x, middle_y, m_min.z), make_float3(m_max.x, m_max.y, middle_z));\n            m_children[4] = new OctreeNode(m_min + make_float3(0, 0, middle_z), make_float3(middle_x, middle_y, m_max.z));\n            m_children[5] = new OctreeNode(make_float3(middle_x, m_min.y, middle_z), make_float3(m_max.x, middle_y, m_max.z));\n            m_children[6] = new OctreeNode(m_min + make_float3(0, middle_y, middle_z), make_float3(middle_x, m_max.y, m_max.z));\n            m_children[7] = new OctreeNode(make_float3(middle_x, middle_y, middle_z), make_float3(m_max.x, m_max.y, m_max.z));\n        }\n\n        void insert(const std::vector<Triangle>& triangles_geometry, int triangle_id_to_insert, int current_depth, int max_depth, int leaf_max_obj_count)\n        {\n            bool depth_exceeded = max_depth != -1 && current_depth == max_depth;\n\n            if (m_is_leaf || depth_exceeded)\n            {\n                m_triangles.push_back(triangle_id_to_insert);\n\n                if (m_triangles.size() > leaf_max_obj_count && !depth_exceeded)\n                {\n                    m_is_leaf = false;//This node isn't a leaf anymore\n\n                    create_children(max_depth, leaf_max_obj_count);\n\n                    for (int triangle_id : m_triangles)\n                        insert_to_children(triangles_geometry, triangle_id, current_depth, max_depth, leaf_max_obj_count);\n\n                    m_triangles.clear();\n                    m_triangles.shrink_to_fit();\n                }\n            }\n            else\n                insert_to_children(triangles_geometry, triangle_id_to_insert, current_depth, max_depth, leaf_max_obj_count);\n\n        }\n\n        void insert_to_children(const std::vector<Triangle>& triangles_geometry, int triangle_id_to_insert, int current_depth, int max_depth, int leaf_max_obj_count)\n        {\n            const Triangle& triangle = triangles_geometry[triangle_id_to_insert];\n            float3 bbox_centroid = triangle.bbox_centroid();\n\n            float middle_x = (m_min.x + m_max.x) / 2;\n            float middle_y = (m_min.y + m_max.y) / 2;\n            float middle_z = (m_min.z + m_max.z) / 2;\n\n            int octant_index = 0;\n\n            if (bbox_centroid.x > middle_x) octant_index += 1;\n            if (bbox_centroid.y > middle_y) octant_index += 2;\n            if (bbox_centroid.z > middle_z) octant_index += 4;\n\n            m_children[octant_index]->insert(triangles_geometry, triangle_id_to_insert, current_depth + 1, max_depth, leaf_max_obj_count);\n        }\n\n        bool intersect(const std::vector<Triangle>& triangles_geometry, const hiprtRay& ray, hiprtHit& hit_info, void* filter_function_payload) const\n        {\n            float trash;\n\n            float denoms[BVHConstants::PLANES_COUNT];\n            float numers[BVHConstants::PLANES_COUNT];\n\n            for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n            {\n                denoms[i] = hippt::dot(BoundingVolume::PLANE_NORMALS[i], ray.direction);\n                numers[i] = hippt::dot(BoundingVolume::PLANE_NORMALS[i], float3(ray.origin));\n            }\n\n            return intersect(triangles_geometry, ray, hit_info, trash, denoms, numers, filter_function_payload);\n        }\n\n        bool intersect(const std::vector<Triangle>& triangles_geometry, const hiprtRay& ray, hiprtHit& hit_info, float& t_near, float* denoms, float* numers, void* filter_function_payload) const\n        {\n            float t_far, trash;\n\n            if (!m_bounding_volume.intersect(trash, t_far, denoms, numers))\n                return false;\n\n            if (m_is_leaf)\n            {\n                for (int triangle_id : m_triangles)\n                {\n                    const Triangle& triangle = triangles_geometry[triangle_id];\n\n                    hiprtHit localHit;\n                    if (triangle.intersect(ray, localHit))\n                    {\n                        localHit.primID = triangle_id;\n\n                        if (localHit.t < ray.minT)\n                            // minT test not passed\n                            continue;\n\n                        if (filter_function(ray, nullptr, filter_function_payload, localHit))\n                            // Hit is filtered\n                            continue;\n\n                        if (localHit.t < hit_info.t || hit_info.t == -1)\n                            hit_info = localHit;\n                    }\n                }\n\n                t_near = hit_info.t;\n\n                return t_near > 0;\n            }\n\n            std::priority_queue<QueueElement, std::vector<QueueElement>, std::greater<QueueElement>> intersection_queue;\n            for (int i = 0; i < 8; i++)\n            {\n                float inter_distance;\n                if (m_children[i]->m_bounding_volume.intersect(inter_distance, t_far, denoms, numers))\n                    intersection_queue.emplace(QueueElement(m_children[i], inter_distance));\n            }\n\n            bool intersection_found = false;\n            float closest_inter = 100000000, inter_distance = 100000000;\n            while (!intersection_queue.empty())\n            {\n                QueueElement top_element = intersection_queue.top();\n                intersection_queue.pop();\n\n                if (top_element.m_node->intersect(triangles_geometry, ray, hit_info, inter_distance, denoms, numers, filter_function_payload))\n                {\n                    closest_inter = std::min(closest_inter, inter_distance);\n                    intersection_found = true;\n\n                    //If we found an intersection that is closer than\n                    //the next element in the queue, we can stop intersecting further\n                    if (intersection_queue.empty() || closest_inter < intersection_queue.top()._t_near)\n                    {\n                        t_near = closest_inter;\n\n                        return true;\n                    }\n                }\n            }\n\n            if (!intersection_found)\n                return false;\n            else\n            {\n                t_near = closest_inter;\n\n                return true;\n            }\n        }\n\n        //If this node has been subdivided (and thus cannot accept any triangles),\n        //this boolean will be set to false\n        bool m_is_leaf = true;\n\n        std::vector<int> m_triangles;\n        std::array<BVH::OctreeNode*, 8> m_children = \n        {\n            nullptr,\n            nullptr,\n            nullptr,\n            nullptr,\n            nullptr,\n            nullptr,\n            nullptr,\n            nullptr\n        };\n\n        float3 m_min, m_max;\n        BoundingVolume m_bounding_volume;\n    };\n\npublic:\n    BVH();\n    BVH(std::vector<Triangle>* triangles, int max_depth = 32, int leaf_max_obj_count = 8);\n    ~BVH();\n\n    void operator=(BVH&& bvh);\n     \n    bool intersect(const hiprtRay& ray, hiprtHit& hit_info, void* filter_function_payload) const;\n\nprivate:\n    void build_bvh(int max_depth, int leaf_max_obj_count, float3 min, float3 max, const BoundingVolume& volume);\n\npublic:\n    OctreeNode* m_root;\n\n    std::vector<Triangle>* m_triangles;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/BVHConstants.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef BVH_CONSTANTS_H\n#define BVH_CONSTANTS_H\n\nstruct BVHConstants\n{\n    static constexpr int FLATTENED_BVH_MAX_STACK_SIZE = 100000;\n\n    static constexpr int PLANES_COUNT = 7;\n    static constexpr int MAX_TRIANGLES_PER_LEAF = 8;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GGXConductorDirectionalAlbedoSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_SETTINGS_H\n#define GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_SETTINGS_H\n\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\nstruct GGXConductorDirectionalAlbedoSettings\n{\n\tint texture_size_cos_theta = GPUBakerConstants::GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O;\n\tint texture_size_roughness = GPUBakerConstants::GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS;\n\tGGXMaskingShadowingFlavor masking_shadowing_term;\n\n\tint integration_sample_count = 262144;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GGXFresnelDirectionalAlbedoSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GGX_FRESNEL_DIRECTIONAL_ALBEDO_SETTINGS_H\n#define GGX_FRESNEL_DIRECTIONAL_ALBEDO_SETTINGS_H\n\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\nstruct GGXFresnelDirectionalAlbedoSettings\n{\n\tint texture_size_cos_theta = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O;\n\tint texture_size_roughness = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS;\n\tint texture_size_ior = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR;\n\tGGXMaskingShadowingFlavor masking_shadowing_term;\n\n\tint integration_sample_count = 65536;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GGXGlassDirectionalAlbedoSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GGX_GLASS_DIRECTIONAL_ALBEDO_SETTINGS_H\n#define GGX_GLASS_DIRECTIONAL_ALBEDO_SETTINGS_H\n\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\nstruct GGXGlassDirectionalAlbedoSettings\n{\n\tint texture_size_cos_theta_o = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O;\n\tint texture_size_roughness = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS;\n\tint texture_size_ior = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR;\n\tGGXMaskingShadowingFlavor masking_shadowing_term;\n\n\tint integration_sample_count = 65536;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GGXThinGlassDirectionalAlbedoSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_SETTINGS_H\n#define GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_SETTINGS_H\n\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\nstruct GGXThinGlassDirectionalAlbedoSettings\n{\n\tint texture_size_cos_theta_o = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O;\n\tint texture_size_roughness = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS;\n\tint texture_size_ior = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR;\n\tGGXMaskingShadowingFlavor masking_shadowing_term;\n\n\tint integration_sample_count = 65536;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GPUBaker.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/Baker/GPUBaker.h\"\n#include \"Threads/ThreadManager.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nGPUBaker::GPUBaker(std::shared_ptr<GPURenderer> renderer) : m_renderer(renderer) \n{\n\tOROCHI_CHECK_ERROR(oroStreamCreate(&m_bake_stream));\n\tm_compiler_priority_mutex = std::make_shared<std::mutex>();\n\n\tm_ggx_conductor_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GGXConductorDirectionalAlbedo.h\", \"GGXConductorDirectionalAlbedoBake\", \"GGX conductor directional albedo\");\n\n\tm_ggx_fresnel_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GGXFresnelDirectionalAlbedo.h\", \"GGXFresnelDirectionalAlbedoBake\", \"GGX fresnel directional albedo\");\n\n\tm_glossy_dielectric_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GlossyDielectricDirectionalAlbedo.h\", \"GlossyDielectricDirectionalAlbedoBake\", \"dielectric directional albedo\");\n\n\tm_ggx_glass_entering_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GGXGlassDirectionalAlbedo.h\", \"GGXGlassDirectionalAlbedoBakeEntering\", \"GGX glass directional albedo 1/2\");\n\tm_ggx_glass_exiting_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GGXGlassDirectionalAlbedo.h\", \"GGXGlassDirectionalAlbedoBakeExiting\", \"GGX glass directional albedo 2/2\");\n\n\tm_ggx_thin_glass_directional_albedo_bake_kernel = GPUBakerKernel(m_renderer, m_bake_stream, m_compiler_priority_mutex,\n\t\tDEVICE_KERNELS_DIRECTORY \"/Baking/GGXThinGlassDirectionalAlbedo.h\", \"GGXThinGlassDirectionalAlbedoBake\", \"GGX thin glass directional albedo\");\n}\n\nvoid GPUBaker::bake_ggx_conductor_directional_albedo(const GGXConductorDirectionalAlbedoSettings& bake_settings, const std::string& output_filename)\n{\n\tm_ggx_conductor_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta, bake_settings.texture_size_roughness, 1),\n\t\t&bake_settings, bake_settings.integration_sample_count, output_filename);\n}\n\nbool GPUBaker::is_ggx_conductor_directional_albedo_bake_complete() const\n{\n\treturn m_ggx_conductor_directional_albedo_bake_kernel.is_complete();\n}\n\nvoid GPUBaker::bake_ggx_fresnel_directional_albedo(const GGXFresnelDirectionalAlbedoSettings& bake_settings, const std::string& output_filename)\n{\n\tm_ggx_fresnel_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta, bake_settings.texture_size_roughness, bake_settings.texture_size_ior),\n\t\t&bake_settings, bake_settings.integration_sample_count, output_filename);\n}\n\nbool GPUBaker::is_ggx_fresnel_directional_albedo_bake_complete() const\n{\n\treturn m_ggx_fresnel_directional_albedo_bake_kernel.is_complete();\n}\n\nvoid GPUBaker::bake_glossy_dielectric_directional_albedo(const GlossyDielectricDirectionalAlbedoSettings& bake_settings, const std::string& output_filename)\n{\n\tm_glossy_dielectric_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta_o, bake_settings.texture_size_roughness, bake_settings.texture_size_ior),\n\t\t&bake_settings, bake_settings.integration_sample_count, output_filename);\n}\n\nbool GPUBaker::is_glossy_dielectric_directional_albedo_bake_complete() const\n{\n\treturn m_glossy_dielectric_directional_albedo_bake_kernel.is_complete();\n}\n\nvoid GPUBaker::bake_ggx_glass_directional_albedo(const GGXGlassDirectionalAlbedoSettings& bake_settings, const std::string& output_filename)\n{\n\tm_ggx_glass_entering_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta_o, bake_settings.texture_size_roughness, bake_settings.texture_size_ior),\n\t\t&bake_settings, bake_settings.integration_sample_count, output_filename);\n\n\tm_ggx_glass_exiting_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta_o, bake_settings.texture_size_roughness, bake_settings.texture_size_ior),\n\t\t&bake_settings, bake_settings.integration_sample_count, \"inv_\" + output_filename);\n}\n\nbool GPUBaker::is_ggx_glass_directional_albedo_bake_complete() const\n{\n\treturn m_ggx_glass_entering_directional_albedo_bake_kernel.is_complete() && m_ggx_glass_exiting_directional_albedo_bake_kernel.is_complete();\n}\n\nvoid GPUBaker::bake_ggx_thin_glass_directional_albedo(const GGXThinGlassDirectionalAlbedoSettings& bake_settings, const std::string& output_filename)\n{\n\tm_ggx_thin_glass_directional_albedo_bake_kernel.bake_internal(\n\t\tmake_int3(bake_settings.texture_size_cos_theta_o, bake_settings.texture_size_roughness, bake_settings.texture_size_ior),\n\t\t&bake_settings, bake_settings.integration_sample_count, output_filename);\n}\n\nbool GPUBaker::is_ggx_thin_glass_directional_albedo_bake_complete() const\n{\n\treturn m_ggx_thin_glass_directional_albedo_bake_kernel.is_complete();\n}\n"
  },
  {
    "path": "src/Renderer/Baker/GPUBaker.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_BAKER_H\n#define GPU_BAKER_H\n\n#include \"Compiler/GPUKernel.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"Image/Image.h\"\n#include \"Renderer/Baker/GlossyDielectricDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXConductorDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXFresnelDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXGlassDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXThinGlassDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GPUBakerKernel.h\"\n#include \"Renderer/GPURenderer.h\"\n\n#include <mutex>\n\nclass GPUBaker\n{\npublic:\n\tGPUBaker(std::shared_ptr<GPURenderer> renderer);\n\n\tvoid bake_ggx_conductor_directional_albedo(const GGXConductorDirectionalAlbedoSettings& bake_settings, const std::string& output_filename);\n\tbool is_ggx_conductor_directional_albedo_bake_complete() const;\n\n\tvoid bake_ggx_fresnel_directional_albedo(const GGXFresnelDirectionalAlbedoSettings& bake_settings, const std::string& output_filename);\n\tbool is_ggx_fresnel_directional_albedo_bake_complete() const;\n\n\tvoid bake_glossy_dielectric_directional_albedo(const GlossyDielectricDirectionalAlbedoSettings& bake_settings, const std::string& output_filename);\n\tbool is_glossy_dielectric_directional_albedo_bake_complete() const;\n\n\tvoid bake_ggx_glass_directional_albedo(const GGXGlassDirectionalAlbedoSettings& bake_settings, const std::string& output_filename);\n\tbool is_ggx_glass_directional_albedo_bake_complete() const;\n\t\n\tvoid bake_ggx_thin_glass_directional_albedo(const GGXThinGlassDirectionalAlbedoSettings& bake_settings, const std::string& output_filename);\n\tbool is_ggx_thin_glass_directional_albedo_bake_complete() const;\n\nprivate:\n\tstd::shared_ptr<GPURenderer> m_renderer = nullptr;\n\n\toroStream_t m_bake_stream;\n\t// Mutex so that if we're baking multiple textures at the same time,\n\t// we don't run into issue with the compilers wanting to take the priority\n\t// (over background compiling kerneks) at the same time\n\tstd::shared_ptr<std::mutex> m_compiler_priority_mutex;\n\n\tGPUBakerKernel m_ggx_conductor_directional_albedo_bake_kernel;\n\tGPUBakerKernel m_ggx_fresnel_directional_albedo_bake_kernel;\n\tGPUBakerKernel m_glossy_dielectric_directional_albedo_bake_kernel;\n\tGPUBakerKernel m_ggx_glass_entering_directional_albedo_bake_kernel;\n\tGPUBakerKernel m_ggx_glass_exiting_directional_albedo_bake_kernel;\n\tGPUBakerKernel m_ggx_thin_glass_directional_albedo_bake_kernel;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GPUBakerConstants.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_BAKER_CONSTANTS_H\n#define GPU_BAKER_CONSTANTS_H\n\n#ifndef __KERNELCC__\n#include <string>\n#endif\n\n#include \"HostDeviceCommon/BSDFsData.h\"\n\nstruct GPUBakerConstants\n{\n\tstatic const int GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O = 128;\n\tstatic const int GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS = 128;\n\n\tstatic const int GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O = 256;\n\tstatic const int GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS = 256;\n\tstatic const int GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR = 256;\n\n\tstatic const int GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O = 256;\n\tstatic const int GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS = 16;\n\tstatic const int GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR = 128;\n\n\tstatic const int GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O = 32;\n\tstatic const int GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS = 32;\n\tstatic const int GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR = 96;\n\n\tstatic const int GLOSSY_DIELECTRIC_TEXTURE_SIZE_COS_THETA_O = 128;\n\tstatic const int GLOSSY_DIELECTRIC_TEXTURE_SIZE_ROUGHNESS = 64;\n\tstatic const int GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR = 128;\n\n\t// Arbitrary number to limit how much computation we do per bake kernel launch.\n\t// This is to avoid driver timeouts\n\tstatic const int COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH = 100000000;\n\n#ifndef __KERNELCC__\n\t// Not using these on the GPU since they are std::string types: unavailable on the GPU\n\t// and besides, we don't these paths on the GPU, only the texture sizes\n\tstatic std::string get_GGX_conductor_directional_albedo_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O,\n\t\tint texture_size_roughness = GPUBakerConstants::GGX_CONDUCTOR_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\n\t\treturn \"GGX_Conductor_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \".hdr\";\n\t}\n\n\tstatic std::string get_GGX_fresnel_directional_albedo_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O,\n\t\tint texture_size_roughness = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS,\n\t\tint texture_size_ior = GPUBakerConstants::GGX_FRESNEL_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\n\t\treturn \"GGX_Fresnel_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \"x\" + std::to_string(texture_size_ior) + \".hdr\";\n\t}\n\n\tstatic std::string get_glossy_dielectric_directional_albedo_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_COS_THETA_O,\n\t\tint texture_size_roughness = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_ROUGHNESS,\n\t\tint texture_size_ior = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\n\t\treturn \"Glossy_Ess_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \"x\" + std::to_string(texture_size_ior) + \".hdr\";\n\t}\n\n\tstatic std::string get_GGX_glass_directional_albedo_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O,\n\t\tint texture_size_roughness = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS,\n\t\tint texture_size_ior = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\n\t\treturn \"GGX_Glass_Ess_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \"x\" + std::to_string(texture_size_ior) + \".hdr\";\n\t}\n\n\tstatic std::string get_GGX_thin_glass_directional_albedo_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O,\n\t\tint texture_size_roughness = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS,\n\t\tint texture_size_ior = GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\n\t\treturn \"GGX_Thin_Glass_Ess_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \"x\" + std::to_string(texture_size_ior) + \".hdr\";\n\t}\n\n\tstatic std::string get_GGX_glass_directional_albedo_inv_texture_filename(GGXMaskingShadowingFlavor masking_shadowing_term, \n\t\tint texture_size_cos_theta = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O, \n\t\tint texture_size_roughness = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS, \n\t\tint texture_size_ior = GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR)\n\t{\n\t\tstd::string flavor_string = masking_shadowing_term == GGXMaskingShadowingFlavor::HeightCorrelated ? \"Correlated_\" : \"Uncorrelated_\";\n\t\t\n\t\treturn \"inv_GGX_Glass_Ess_\" + flavor_string + std::to_string(texture_size_cos_theta) + \"x\" + std::to_string(texture_size_roughness) + \"x\" + std::to_string(texture_size_ior) + \".hdr\";\n\t}\n#endif\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GPUBakerKernel.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/Baker/GPUBakerKernel.h\"\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n#include \"Threads/ThreadManager.h\"\n\nGPUBakerKernel::GPUBakerKernel(std::shared_ptr<GPURenderer> renderer, oroStream_t bake_stream, std::shared_ptr<std::mutex> compiler_priority_mutex,\n\tconst std::string& kernel_filepath, const std::string& kernel_function, const std::string& kernel_title)\n{\n\tm_renderer = renderer;\n\tm_bake_stream = bake_stream;\n\n\tm_kernel_filepath = kernel_filepath;\n\tm_kernel_function = kernel_function;\n\tm_kernel_title = kernel_title;\n}\n\nvoid GPUBakerKernel::bake_internal(int3 bake_resolution, const void* bake_settings_pointer, int nb_kernel_iterations, std::string output_filename)\n{\n\tm_bake_complete = false;\n\n\tstruct ThreadData\n\t{\n\t\tint3 bake_resolution;\n\t\tconst void* bake_settings_pointer;\n\t\tstd::string output_filename;\n\t};\n\n\t// Allocating that on the heap so that it stays alive for the thread even\n\t// when we return from this function\n\tstd::shared_ptr<ThreadData> thread_data = std::make_shared<ThreadData>();\n\tthread_data->bake_resolution = bake_resolution;\n\tthread_data->bake_settings_pointer = bake_settings_pointer;\n\tthread_data->output_filename = output_filename;\n\n\t// Starting everything on a thread to avoid blocking to UI (during the compilation\n\t// of the kernel mainly)\n\tThreadManager::start_thread(\"kernel_bake_\" + m_kernel_title, [this, thread_data, nb_kernel_iterations] {\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_renderer->get_hiprt_orochi_ctx()->orochi_ctx));\n\n\t\tint3& bake_resolution = thread_data->bake_resolution;\n\t\tconst void* bake_settings_pointer = thread_data->bake_settings_pointer;\n\t\tstd::string& output_filename = thread_data->output_filename;\n\n\t\tif (!m_bake_kernel.has_been_compiled())\n\t\t{\n\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"%s\", (\"Compiling \" + m_kernel_title + \" kernel...\").c_str());\n\n\t\t\tm_bake_kernel = GPUKernel(m_kernel_filepath, m_kernel_function);\n\t\t\tm_bake_kernel.compile(m_renderer->get_hiprt_orochi_ctx());\n\t\t}\n\n\t\tm_bake_buffer.resize(bake_resolution.x * bake_resolution.y * bake_resolution.z);\n\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"%s\", (\"Launching \" + m_kernel_title + \" baking...\").c_str());\n\n\t\tint3 tile_size;\n\t\tif (bake_resolution.z > 1)\n\t\t\t// 3D launch\n\t\t\ttile_size = make_int3(4, 4, 4);\n\t\telse\n\t\t\t// 2D launch\n\t\t\ttile_size = make_int3(8, 8, 1);\n\n\t\tfloat kernel_duration;\n\t\toroEvent_t start, stop;\n\t\tOROCHI_CHECK_ERROR(oroEventCreate(&start));\n\t\tOROCHI_CHECK_ERROR(oroEventCreate(&stop));\n\t\tOROCHI_CHECK_ERROR(oroEventRecord(start, m_bake_stream));\n\n\t\t// Zeroing the buffer that we're going to accumulate the bake data into\n\t\tm_bake_buffer.memset_whole_buffer(0);\n\n\t\t// Launching many \"small\" kernels to avoid driver timeouts\n\t\tint iterations_per_kernel = floor(hippt::max(1.0f, (float)GPUBakerConstants::COMPUTE_ELEMENT_PER_BAKE_KERNEL_LAUNCH / (bake_resolution.x * bake_resolution.y * bake_resolution.z)));\n\t\tint nb_kernel_launch = ceil(nb_kernel_iterations / (float)iterations_per_kernel);\n\n\t\tvoid* non_const_setting = const_cast<void*>(bake_settings_pointer);\n\t\tfor (int i = 0; i < nb_kernel_launch; i++)\n\t\t{\n\t\t\t// The current iteration variable is used in the kernel to shuffle the random\n\t\t\t// so that we get different random numbers per each kernel launch\n\t\t\tint current_iteration = i + 1;\n\t\t\tfloat* device_buffer = m_bake_buffer.get_device_pointer();\n\t\t\tvoid* bake_kernel_launch_args[] = { &iterations_per_kernel, &current_iteration, non_const_setting, &device_buffer };\n\t\t\tm_bake_kernel.launch_asynchronous_3D(\n\t\t\t\ttile_size.x, tile_size.y, tile_size.z,\n\t\t\t\tbake_resolution.x, bake_resolution.y, bake_resolution.z,\n\t\t\t\tbake_kernel_launch_args, m_bake_stream);\n\t\t}\n\n\t\tOROCHI_CHECK_ERROR(oroEventRecord(stop, m_bake_stream));\n\t\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_bake_stream));\n\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&kernel_duration, start, stop));\n\t\tOROCHI_CHECK_ERROR(oroEventDestroy(start));\n\t\tOROCHI_CHECK_ERROR(oroEventDestroy(stop));\n\n\t\tstd::string unit_suffix = kernel_duration < 1000.0f ? \"ms!\" : \"s!\";\n\t\tkernel_duration = kernel_duration > 1000.0f ? kernel_duration / 1000.0f : kernel_duration;\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"%s\", (m_kernel_title + \" completed in \" + std::to_string(kernel_duration) + unit_suffix).c_str());\n\n\t\tif (bake_resolution.z > 1)\n\t\t{\n\t\t\t// 3D texture\n\t\t\tstd::vector<float> baked_data = m_bake_buffer.download_data();\n\t\t\tfor (int i = 0; i < bake_resolution.z; i++)\n\t\t\t{\n\t\t\t\tImage32Bit image = Image32Bit(baked_data.data() + i * bake_resolution.x * bake_resolution.y, bake_resolution.x, bake_resolution.y, /* nb channels */ 1);\n\n\t\t\t\tstd::string final_filename = std::to_string(i) + output_filename;\n\t\t\t\timage.write_image_hdr(final_filename.c_str(), false);\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// A single 2D image\n\t\t\tstd::vector<float> baked_data = m_bake_buffer.download_data();\n\t\t\tImage32Bit image = Image32Bit(baked_data, bake_resolution.x, bake_resolution.y, 1);\n\t\t\timage.write_image_hdr(output_filename.c_str(), false);\n\t\t}\n\n\n\t\tm_bake_buffer.free();\n\t\tm_bake_complete = true;\n\t});\n\n\tThreadManager::detach_threads(\"kernel_bake_\" + m_kernel_title);\n}\n\nbool GPUBakerKernel::is_complete() const\n{\n\treturn m_bake_complete;\n}\n"
  },
  {
    "path": "src/Renderer/Baker/GPUBakerKernel.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_BAKER_KERNEL_H\n#define GPU_BAKER_KERNEL_H\n\n#include \"Renderer/GPURenderer.h\"\n\n#include <string>\n\nclass GPUBakerKernel\n{\npublic:\n\tGPUBakerKernel() {}\n\tGPUBakerKernel(std::shared_ptr<GPURenderer> renderer, oroStream_t bake_stream, std::shared_ptr<std::mutex> compiler_priority_mutex, \n\t\tconst std::string& kernel_filepath, const std::string& kernel_function, const std::string& kernel_title);\n\n\t/**\n\t * Starts the baking process\n\t */\n\tvoid bake_internal(int3 bake_resolution, const void* bake_settings_pointer, int nb_kernel_iterations, std::string output_filename);\n\n\t/**\n\t * Is the baking process complete?\n\t */\n\tbool is_complete() const;\n\nprivate:\n\tbool m_bake_complete = true;\n\n\tstd::shared_ptr<GPURenderer> m_renderer = nullptr;\n\toroStream_t m_bake_stream = nullptr;\n\n\t// Filepath and function within this file that will be launched\n\t// when the baking of the kernel starts\n\tstd::string m_kernel_filepath = \"\";\n\tstd::string m_kernel_function = \"\";\n\t// String used for logging infos\n\tstd::string m_kernel_title = \"\";\n\t// State for baking GGX conductors directional albedo\n\tGPUKernel m_bake_kernel;\n\n\t// Buffer for holding the baked data\n\tOrochiBuffer<float> m_bake_buffer;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Baker/GlossyDielectricDirectionalAlbedoSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DIELECTRIC_FRESNEL_DIRECTIONAL_ALBEDO_SETTINGS_H\n#define DIELECTRIC_FRESNEL_DIRECTIONAL_ALBEDO_SETTINGS_H\n\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n\nstruct GlossyDielectricDirectionalAlbedoSettings\n{\n\tint texture_size_cos_theta_o = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_COS_THETA_O;\n\tint texture_size_roughness = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_ROUGHNESS;\n\tint texture_size_ior = GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR;\n\tGGXMaskingShadowingFlavor masking_shadowing_term;\n\n\tint integration_sample_count = 131072;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/BoundingVolume.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef BOUNDING_VOLUME_H\n#define BOUNDING_VOLUME_H\n\n#include \"BVHConstants.h\"\n#include \"Renderer/Triangle.h\"\n\n#include <array>\n\nstruct BoundingVolume\n{\n    static const float3 PLANE_NORMALS[BVHConstants::PLANES_COUNT];\n\n    std::array<float, BVHConstants::PLANES_COUNT> _d_near;\n    std::array<float, BVHConstants::PLANES_COUNT> _d_far;\n\n    BoundingVolume()\n    {\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            _d_near[i] = INFINITY;\n            _d_far[i] = -INFINITY;\n        }\n    }\n\n    static void triangle_volume(const Triangle& triangle, std::array<float, BVHConstants::PLANES_COUNT>& d_near, std::array<float, BVHConstants::PLANES_COUNT>& d_far)\n    {\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            for (int j = 0; j < 3; j++)\n            {\n                float dist = hippt::dot(BoundingVolume::PLANE_NORMALS[i], float3(triangle[j]));\n\n                d_near[i] = hippt::min(d_near[i], dist);\n                d_far[i] = hippt::max(d_far[i], dist);\n            }\n        }\n    }\n\n    void extend_volume(const std::array<float, BVHConstants::PLANES_COUNT>& d_near, const std::array<float, BVHConstants::PLANES_COUNT>& d_far)\n    {\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            _d_near[i] = hippt::min(_d_near[i], d_near[i]);\n            _d_far[i] = hippt::max(_d_far[i], d_far[i]);\n        }\n    }\n\n    void extend_volume(const BoundingVolume& volume)\n    {\n        extend_volume(volume._d_near, volume._d_far);\n    }\n\n    void extend_volume(const Triangle& triangle)\n    {\n        std::array<float, BVHConstants::PLANES_COUNT> d_near;\n        std::array<float, BVHConstants::PLANES_COUNT> d_far;\n\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            d_near[i] = INFINITY;\n            d_far[i] = -INFINITY;\n        }\n\n        triangle_volume(triangle, d_near, d_far);\n        extend_volume(d_near, d_far);\n    }\n\n    static bool intersect(const std::array<float, BVHConstants::PLANES_COUNT>& d_near,\n                          const std::array<float, BVHConstants::PLANES_COUNT>& d_far,\n                          const std::array<float, BVHConstants::PLANES_COUNT>& denoms,\n                          const std::array<float, BVHConstants::PLANES_COUNT>& numers)\n    {\n        float t_near = -INFINITY;\n        float t_far = INFINITY;\n\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            float denom = denoms[i];\n            if (denom == 0.0f)\n                continue;\n\n            //inverse denom to avoid division\n            float d_near_i = (d_near[i] - numers[i]) / denom;\n            float d_far_i = (d_far[i] - numers[i]) / denom;\n            if (denom < 0)\n                std::swap(d_near_i, d_far_i);\n\n            t_near = hippt::max(t_near, d_near_i);\n            t_far = hippt::min(t_far, d_far_i);\n\n            if (t_far < t_near)\n                return false;\n        }\n\n        return true;\n    }\n\n    /**\n     * @params denoms Precomputed denominators\n     */\n    bool intersect(float& t_near, float& t_far, float* denoms, float* numers) const\n    {\n        t_near = -INFINITY;\n        t_far = INFINITY;\n\n        for (int i = 0; i < BVHConstants::PLANES_COUNT; i++)\n        {\n            float denom = denoms[i];\n            if (denom == 0.0f)\n                continue;\n\n            //inverse denom to avoid division\n            float d_near_i = (_d_near[i] - numers[i]) / denom;\n            float d_far_i = (_d_far[i] - numers[i]) / denom;\n            if (denom < 0.0f)\n                std::swap(d_near_i, d_far_i);\n\n            t_near = hippt::max(t_near, d_near_i);\n            t_far = hippt::min(t_far, d_far_i);\n\n            if (t_far < t_near)\n                return false;\n        }\n\n        return true;\n    }\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUDataStructures/GBufferCPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef G_BUFFER_CPU_RENDERER_H\n#define G_BUFFER_CPU_RENDERER_H\n\n#include \"Device/includes/RayVolumeState.h\"\n\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n\n#include <vector>\n\n // GBuffer that stores information about the current frame first hit data\nstruct GBufferCPUData\n{\n\tvoid resize(unsigned int new_element_count)\n\t{\n\t\tmaterials.resize(new_element_count);\n\t\tgeometric_normals.resize(new_element_count);\n\t\tshading_normals.resize(new_element_count);\n\t\tprimary_hit_position.resize(new_element_count);\n\t\tfirst_hit_prim_index.resize(new_element_count);\n\t\tcameray_ray_hit.resize(new_element_count);\n\t\tray_volume_states.resize(new_element_count);\n\t}\n\n\tstd::vector<DevicePackedEffectiveMaterial> materials;\n\tstd::vector<Octahedral24BitNormalPadded32b> geometric_normals;\n\tstd::vector<Octahedral24BitNormalPadded32b> shading_normals;\n\tstd::vector<float3> primary_hit_position;\n\tstd::vector<int> first_hit_prim_index;\n\n\tstd::vector<unsigned char> cameray_ray_hit;\n\n\tstd::vector<RayVolumeState> ray_volume_states;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUDataStructures/GMoNCPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_GMON_CPU_DATA_H\n#define RENDERER_GMON_CPU_DATA_H\n\n#include \"Renderer/CPUGPUCommonDataStructures/GMoNCPUGPUCommonData.h\"\n\n/**\n * CPU-side data structure for the implementation of GMoN on the CPU\n *\n * Reference:\n * [1] [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\n */\nstruct GMoNCPUData : public GMoNCPUGPUCommonData\n{\n\tvoid resize(unsigned int render_width, unsigned int render_height)\n\t{\n\t\tsets.resize(render_width * render_height * number_of_sets);\n\n\t\tresult_framebuffer = Image32Bit(render_width, render_height, /* channels */ 3);\n\t}\n\n\t// This is one very big buffer that contains all the sets we accumulate into for GMoN\n\t//\n\t// For example, if GMoNMSets == 5 and a render resolution of 1280x720,\n\t// this is going to be a buffer that is 1280*720*5 elements long\n\tstd::vector<ColorRGB32F> sets;\n\n\t// This is the buffer that contains the G-median of means result of each pixel and this is going\n\t// to be displayed in the viewport instead of the regular framebuffer if GMoN is being used\n\tImage32Bit result_framebuffer;\n\n\tunsigned int number_of_sets = GMoNMSetsCount;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUDataStructures/MaterialPackedSoACPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_CPU_DATA_H\n#define HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_CPU_DATA_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialPackedSoA.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/DevicePackedMaterialSoACPUGPUCommonData.h\"\n\n#define DECLARE_ALL_MEMBERS_STD_TIE                                     \\\n  auto all_members = std::tie(                                          \\\n      normal_map_emission_index, base_color_roughness_metallic_index,   \\\n      roughness_and_metallic_index, anisotropic_specular_index,         \\\n      coat_sheen_index, specular_transmission_index,                    \\\n                                                                        \\\n      flags,                                                            \\\n                                                                        \\\n      emission,                                                         \\\n                                                                        \\\n      base_color_roughness,                                             \\\n                                                                        \\\n      oren_nayar_sigma,                                                 \\\n                                                                        \\\n      metallic_F90_and_metallic, metallic_F82_packed_and_diffuse_transmission,                   \\\n      metallic_F90_falloff_exponent,                                    \\\n      anisotropy_and_rotation_and_second_roughness,                     \\\n                                                                        \\\n      specular_color_and_tint_factor,                                   \\\n      specular_and_darkening_and_coat_roughness, coat_medium_thickness, \\\n      coat_and_medium_absorption,                                       \\\n      coat_roughening_darkening_anisotropy_and_rotation, coat_ior,      \\\n                                                                        \\\n      sheen_and_color,                                                  \\\n                                                                        \\\n      ior, absorption_color_packed, absorption_at_distance,             \\\n                                                                        \\\n      sheen_roughness_transmission_dispersion_thin_film,                \\\n                                                                        \\\n      dispersion_abbe_number, thin_film_ior, thin_film_thickness,       \\\n      thin_film_kappa_3, thin_film_base_ior_override,                   \\\n      alpha_thin_film_hue_dielectric_priority);\n\n/**\n * These two structures here are just there to hold all the buffers created on the CPU\n * \n * The device pointers of these buffers are then set on to the RenderData of the CPU\n * \n * For a documentation of what's packed into the members ('specular_and_darkening_and_coat_roughness' for example),\n * see the 'DevicePackedEffectiveMaterialSoA' class\n */\n\nstruct DevicePackedEffectiveMaterialSoACPUData : public DevicePackedMaterialSoACPUGPUCommonData\n{\n    std::vector<UChar8BoolsPacked> flags;\n\n    std::vector<ColorRGB32F> emission;\n\n    std::vector<ColorRGB24bFloat0_1Packed> base_color_roughness;\n\n    std::vector<float> oren_nayar_sigma;\n \n    std::vector<ColorRGB24bFloat0_1Packed> metallic_F90_and_metallic;\n    std::vector<ColorRGB24bFloat0_1Packed> metallic_F82_packed_and_diffuse_transmission;\n    std::vector<float> metallic_F90_falloff_exponent;\n    std::vector<Float4xPacked> anisotropy_and_rotation_and_second_roughness;\n\n    std::vector<ColorRGB24bFloat0_1Packed> specular_color_and_tint_factor;\n    std::vector<Float4xPacked> specular_and_darkening_and_coat_roughness;\n    std::vector<float> coat_medium_thickness;\n    std::vector<ColorRGB24bFloat0_1Packed> coat_and_medium_absorption;\n    std::vector<Float4xPacked> coat_roughening_darkening_anisotropy_and_rotation;\n    std::vector<float> coat_ior;\n\n    std::vector<ColorRGB24bFloat0_1Packed> sheen_and_color;\n\n    std::vector<float> ior;\n    std::vector<ColorRGB24bFloat0_1Packed> absorption_color_packed;\n    std::vector<float> absorption_at_distance;\n\n    std::vector<Float4xPacked> sheen_roughness_transmission_dispersion_thin_film;\n\n    std::vector<float> dispersion_abbe_number;\n    std::vector<float> thin_film_ior;\n    std::vector<float> thin_film_thickness;\n    std::vector<float> thin_film_kappa_3;\n    std::vector<float> thin_film_base_ior_override;\n    std::vector<Float2xUChar2xPacked> alpha_thin_film_hue_dielectric_priority;\n};\n\nstruct DevicePackedTexturedMaterialSoACPUData : public DevicePackedEffectiveMaterialSoACPUData\n{\n    std::vector<Uint2xPacked> normal_map_emission_index;\n    std::vector<Uint2xPacked> base_color_roughness_metallic_index;\n    std::vector<Uint2xPacked> roughness_and_metallic_index;\n    std::vector<Uint2xPacked> anisotropic_specular_index;\n    std::vector<Uint2xPacked> coat_sheen_index;\n    std::vector<Uint2xPacked> specular_transmission_index;\n\n    // Resize function using the generic for_each_member\n    void resize(size_t new_element_count)\n    {\n        m_element_count = new_element_count;\n\n        // This declares a std::tie of all the buffers\n        DECLARE_ALL_MEMBERS_STD_TIE;\n\n        // Function that will be applied to all the buffers to resize them\n        auto resize_lambda_function = [new_element_count](auto& buffer) { buffer.resize(new_element_count); };\n\n        // Applying the resize function to all the buffers\n        std::apply([&](auto&... args) { (resize_lambda_function(args), ...); }, all_members);\n    }\n\n    void upload_data(std::vector<DevicePackedTexturedMaterial>& gpu_packed_materials)\n    {\n        DevicePackedTexturedMaterial* data = gpu_packed_materials.data();\n        size_t element_count = gpu_packed_materials.size();\n\n        // Textured part\n        normal_map_emission_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, normal_map_emission_index), element_count);\n        base_color_roughness_metallic_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, base_color_roughness_metallic_index), element_count);\n        roughness_and_metallic_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, roughness_and_metallic_index), element_count);\n        anisotropic_specular_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, anisotropic_specular_index), element_count);\n        coat_sheen_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, coat_sheen_index), element_count);\n        specular_transmission_index = expand_from_gpu_packed_materials<Uint2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, specular_transmission_index), element_count);\n\n        // Non textured parameters\n        flags = expand_from_gpu_packed_materials<UChar8BoolsPacked>(0, data, offsetof(DevicePackedTexturedMaterial, flags), element_count);\n\n        emission = expand_from_gpu_packed_materials<ColorRGB32F>(0, data, offsetof(DevicePackedTexturedMaterial, emission), element_count);\n\n        base_color_roughness = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, base_color_roughness), element_count);\n\n        oren_nayar_sigma = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, oren_nayar_sigma), element_count);\n\n        metallic_F90_and_metallic = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, metallic_F90_and_metallic), element_count);\n        metallic_F82_packed_and_diffuse_transmission = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, metallic_F82_packed_and_diffuse_transmission), element_count);\n        metallic_F90_falloff_exponent = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, metallic_F90_falloff_exponent), element_count);\n        anisotropy_and_rotation_and_second_roughness = expand_from_gpu_packed_materials<Float4xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, anisotropy_and_rotation_and_second_roughness), element_count);\n\n        specular_color_and_tint_factor = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, specular_color_and_tint_factor), element_count);\n        specular_and_darkening_and_coat_roughness = expand_from_gpu_packed_materials<Float4xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, specular_and_darkening_and_coat_roughness), element_count);\n        coat_medium_thickness = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, coat_medium_thickness), element_count);\n        coat_and_medium_absorption = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, coat_and_medium_absorption), element_count);\n        coat_roughening_darkening_anisotropy_and_rotation = expand_from_gpu_packed_materials<Float4xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, coat_roughening_darkening_anisotropy_and_rotation), element_count);\n        coat_ior = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, coat_ior), element_count);\n\n        sheen_and_color = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, sheen_and_color), element_count);\n\n        ior = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, ior), element_count);\n        absorption_color_packed = expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(0, data, offsetof(DevicePackedTexturedMaterial, absorption_color_packed), element_count);\n        absorption_at_distance = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, absorption_at_distance), element_count);\n\n        sheen_roughness_transmission_dispersion_thin_film = expand_from_gpu_packed_materials<Float4xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, sheen_roughness_transmission_dispersion_thin_film), element_count);\n\n        dispersion_abbe_number = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, dispersion_abbe_number), element_count);\n        thin_film_ior = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, thin_film_ior), element_count);\n        thin_film_thickness = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, thin_film_thickness), element_count);\n        thin_film_kappa_3 = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, thin_film_kappa_3), element_count);\n        thin_film_base_ior_override = expand_from_gpu_packed_materials<float>(0, data, offsetof(DevicePackedTexturedMaterial, thin_film_base_ior_override), element_count);\n        alpha_thin_film_hue_dielectric_priority = expand_from_gpu_packed_materials<Float2xUChar2xPacked>(0, data, offsetof(DevicePackedTexturedMaterial, alpha_thin_film_hue_dielectric_priority), element_count);\n    }\n\n    DevicePackedTexturedMaterialSoA get_device_SoA_struct()\n    {\n        DevicePackedTexturedMaterialSoA out;\n\n        out.normal_map_emission_index = normal_map_emission_index.data();\n        out.base_color_roughness_metallic_index = base_color_roughness_metallic_index.data();\n        out.roughness_and_metallic_index = roughness_and_metallic_index.data();\n        out.anisotropic_specular_index = anisotropic_specular_index.data();\n        out.coat_sheen_index = coat_sheen_index.data();\n        out.specular_transmission_index = specular_transmission_index.data();\n\n        out.flags = flags.data();\n\n        out.emission = emission.data();\n\n        out.base_color_roughness = base_color_roughness.data();\n\n        out.oren_nayar_sigma = oren_nayar_sigma.data();\n\n        out.metallic_F90_and_metallic = metallic_F90_and_metallic.data();\n        out.metallic_F82_packed_and_diffuse_transmission = metallic_F82_packed_and_diffuse_transmission.data();\n        out.metallic_F90_falloff_exponent = metallic_F90_falloff_exponent.data();\n        out.anisotropy_and_rotation_and_second_roughness = anisotropy_and_rotation_and_second_roughness.data();\n\n        out.specular_color_and_tint_factor = specular_color_and_tint_factor.data();\n        out.specular_and_darkening_and_coat_roughness = specular_and_darkening_and_coat_roughness.data();\n        out.coat_medium_thickness = coat_medium_thickness.data();\n        out.coat_and_medium_absorption = coat_and_medium_absorption.data();\n        out.coat_roughening_darkening_anisotropy_and_rotation = coat_roughening_darkening_anisotropy_and_rotation.data();\n        out.coat_ior = coat_ior.data();\n\n        out.sheen_and_color = sheen_and_color.data();\n\n        out.ior = ior.data();\n        out.absorption_color_packed = absorption_color_packed.data();\n        out.absorption_at_distance = absorption_at_distance.data();\n\n        out.sheen_roughness_transmission_dispersion_thin_film = sheen_roughness_transmission_dispersion_thin_film.data();\n\n        out.dispersion_abbe_number = dispersion_abbe_number.data();\n        out.thin_film_ior = thin_film_ior.data();\n        out.thin_film_thickness = thin_film_thickness.data();\n        out.thin_film_kappa_3 = thin_film_kappa_3.data();\n        out.thin_film_base_ior_override = thin_film_base_ior_override.data();\n        out.alpha_thin_film_hue_dielectric_priority = alpha_thin_film_hue_dielectric_priority.data();\n\n        return out;\n    }\n\n    size_t m_element_count = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUDataStructures/NEEPlusPlusCPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_NEE_PLUS_PLUS_CPU_DATA_H\n#define RENDERER_NEE_PLUS_PLUS_CPU_DATA_H\n\n// For int3 and AtomicType\n#include \"HostDeviceCommon/Math.h\"\n\n#include <vector>\n\nstruct NEEPlusPlusCPUData\n{\n\tint frame_timer_before_visibility_map_update = 1;\n\n\tstd::vector<AtomicType<unsigned int>> total_unoccluded_rays;\n\tstd::vector<AtomicType<unsigned int>> total_num_rays;\n\n\tstd::vector<AtomicType<unsigned int>> num_rays_staging;\n\tstd::vector<AtomicType<unsigned int>> unoccluded_rays_staging;\n\n\tstd::vector<AtomicType<unsigned int>> checksum_buffer;\n\n\tAtomicType<unsigned long long int> total_shadow_ray_queries;\n\tAtomicType<unsigned long long int> shadow_rays_actually_traced;\n\tAtomicType<unsigned int> total_cell_alive_count;\n};\n\n#endif"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/DevicePackedMaterialSoACPUGPUCommonData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_DEVICE_PACKED_MATERIAL_SOA_CPU_GPU_COMMON_DATA_H\n#define RENDERER_DEVICE_PACKED_MATERIAL_SOA_CPU_GPU_COMMON_DATA_H\n\nstruct DevicePackedMaterialSoACPUGPUCommonData\n{\n    /**\n     * Takes a pointer to some 'DevicePackedTexturedMaterial' in the 'gpu_packed_materials' array (which could be std::vector().data() for example)\n     * and returns a vector of type T that contains 'element_count' elements at offset 'offset' of the 'DevicePackedTexturedMaterial' structure\n     *\n     * For example:\n     * expand_from_gpu_packed_materials<Uint2xPacked>(3, gpu_packed_materials, offsetof(DevicePackedTexturedMaterial, normal_map_emission_index), 2)\n     *\n     * return an std::vector that contains the 'normal_map_emission_index' of gpu_packed_materials[3] and gpu_packed_materials[4]\n     */\n    template <typename T>\n    std::vector<T> expand_from_gpu_packed_materials(unsigned int start_index, const DevicePackedTexturedMaterial* gpu_packed_materials, size_t offset_in_struct, size_t element_count)\n    {\n        std::vector<T> out(element_count);\n\n        for (int i = 0; i < element_count; i++)\n            out[i] = *reinterpret_cast<const T*>(reinterpret_cast<const char*>(&gpu_packed_materials[start_index + i]) + offset_in_struct);\n\n        return out;\n    }\n};\n\n#endif"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/GMoNCPUGPUCommonData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_GMON_CPU_GPU_COMMON_DATA_H\n#define RENDERER_GMON_CPU_GPU_COMMON_DATA_H\n\n#include \"HostDeviceCommon/KernelOptions/GMoNOptions.h\"\n\nstruct GMoNCPUGPUCommonData\n{\n\t// Whether or not GMoN is actively being used\n\tbool using_gmon = HIPRTRenderSettings::DEBUG_DEV_GMON_BLEND_WEIGHTS;\n\n\t// How much to blend between the non-GMoN output and the GMoN output\n\tfloat gmon_blend_factor = 0.0f;\n\tbool gmon_auto_blend_factor = true;\n\n\tint2 current_resolution = make_int2(1280, 720);\n\tunsigned int current_number_of_sets = GMoNMSetsCount;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/GenericSoA.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_GENERIC_SOA_H\n#define RENDERER_GENERIC_SOA_H\n\n#include <cstddef>\n#include <tuple>\n#include <type_traits>\n#include <utility>\n\n#include \"HostDeviceCommon/AtomicType.h\"\n\ntemplate<typename T, template<typename> class Container>\nusing GenericAtomicType = typename std::conditional_t<std::is_same<Container<T>, std::vector<T>>::value, AtomicType<T>, T>;\n\n// Helper to detect std::atomic<...>\n//\n// std::false_type and std::true_type are structures that\n// have ::value equal to 'false' or ::value equal to 'true' respectively\n//\n// By inheriting from std::false_type or std::true_type, we can check at compile time\n// what's our ::value and use a constexpr if() on that\ntemplate<typename T>\nstruct IsStdAtomic : std::false_type {};\n\ntemplate<typename U>\nstruct IsStdAtomic<std::atomic<U>> : std::true_type {};\n\n/**\n * Can be used to create a structure of arrays for multiple buffers of different types.\n * \n * For example, to declare an SoA of 3 buffers: float, float and int, this can be used as:\n * \n * GenericSoA<std::vector, float, float, int> if the SoA is meant to be used on the CPU (std::vector)\n * GenericSoA<std::vector, float, float, int> if the SoA is meant to be used on the GPU (OrochiBuffer)\n * \n * The 'Container' type must support the following operations:\n *      - resize(int new_element_count) -> resizes the container to hold new_element_count elements\n *      - size() -> returns the number of elements in the container\n */\ntemplate<\n    template<typename> class Container,\n    typename... Ts>\nstruct GenericSoA\n{\n    template <typename T>\n    using BufferTypeFromVariable = typename std::decay_t<T>::value_type;\n\n    template <int bufferIndex>\n    using BufferTypeFromIndex = typename std::tuple_element<bufferIndex, std::tuple<Container<Ts>...>>::type::value_type;\n\n    using IsCPUBuffer = std::is_same<Container<BufferTypeFromIndex<0>>, std::vector<BufferTypeFromIndex<0>>>;\n\n    void resize(std::size_t new_element_count)\n    {\n        // Applies resize(new_element_count) on each buffer in the tuple\n        std::apply([this, new_element_count](auto&... buffer) \n        { \n            (resize_buffer_internal(buffer, new_element_count), ...);\n        }, buffers);\n    }\n\n    std::size_t get_byte_size() const\n    {\n        std::size_t total = 0;\n\n        // For each container, add sizeof(value_type) * size()\n        std::apply([&](auto const&... buffer) \n        {\n            ((total += buffer.size() * sizeof(BufferTypeFromVariable<decltype(buffer)>)), ...);\n        }, buffers);\n\n        return total;\n    }\n\n\tunsigned int size() const\n\t{\n        return std::get<0>(buffers).size();\n\t}\n\n    template<int bufferIndex>\n    void memset_buffer(BufferTypeFromIndex<bufferIndex> memset_value)\n    {\n        if constexpr (IsCPUBuffer::value)\n        {\n            if constexpr (IsStdAtomic<BufferTypeFromIndex<bufferIndex>>::value)\n            {\n                // For atomic types, we have to store into them with a loop because they do not have an =operator()\n                // so we can't use std::fill\n                for (auto& value : get_buffer<bufferIndex>())\n                    value.store(memset_value);\n            }\n            else\n                std::fill(get_buffer<bufferIndex>().begin(), get_buffer<bufferIndex>().end(), memset_value);\n        }\n        else\n        {\n            std::vector<BufferTypeFromIndex<bufferIndex>> data(size(), memset_value);\n            get_buffer<bufferIndex>().upload_data(data);\n        }\n    }\n\n    template<int bufferIndex>\n    auto& get_buffer()\n    {\n        return std::get<bufferIndex>(buffers);\n    }\n\n    template<int bufferIndex>\n    auto* get_buffer_data_ptr()\n    {\n        return std::get<bufferIndex>(buffers).data();\n    }\n\n    template<int bufferIndex>\n    auto* get_buffer_data_atomic_ptr()\n    {\n        if constexpr (IsCPUBuffer::value)\n\t\t\treturn std::get<bufferIndex>(buffers).data();\n        else\n            // For the GPU, calling the 'get_atomic_device_pointer' of OrochiBuffer\n            return std::get<bufferIndex>(buffers).get_atomic_device_pointer();\n    }\n\n    template <int bufferIndex>\n    void upload_to_buffer(const std::vector<BufferTypeFromIndex<bufferIndex>>& data)\n    {\n        if constexpr (IsCPUBuffer::value)\n        {\n            // If our main container type for this SoA is std::vector (i.e. this is for the CPU), then we're uploading\n            // to the buffer simply by copying\n            get_buffer<bufferIndex>() = data;\n        }\n        else\n        {\n            // If our main container type for this SoA is OrochiBuffer (i.e. this is for the GPU), then we're uploading\n            // to the buffer by uploading to the GPU\n            get_buffer<bufferIndex>().upload_data(data);\n        }\n    }\n\n    void free()\n    {\n        // Applies clear() on each buffer in the tuple\n        std::apply([](auto&... buffer)\n        {\n            // decltype here gives us the exact type of 'buffer' which can be std::vector<float>& for example,\n            // **with** the reference type\n            //\n            // But we want to clear the buffer by overriding it with a newly instantiated buffer so we don't want\n            // the reference, hence the use of std::decay_t\n            ((buffer = std::decay_t<decltype(buffer)>{}), ...);\n        }, buffers);\n    }\n\nprivate:\n    template <typename BufferType>\n    void resize_buffer_internal(BufferType& buffer, std::size_t new_element_count)\n    {\n        if constexpr (IsStdAtomic<typename BufferType::value_type>::value)\n            // If the buffer is a buffer of std::atomic on the CPU, we cannot use resize\n            // (because std::atomic are missing some operators used by\n            // std::vector.resize() so we have to recreate the buffer instead\n            buffer = std::decay_t<decltype(buffer)>(new_element_count);\n        else\n            buffer.resize(new_element_count);\n    }\n\n    std::tuple<Container<Ts>...> buffers;\n};\n\nnamespace GenericSoAHelpers\n{\n    template<template<typename> class BufferContainer, typename T, typename U>\n    void memset_buffer(BufferContainer<T>& buffer, U memset_value)\n    {\n        if constexpr (std::is_same_v<BufferContainer<T>, std::vector<T>>)\n        {\n            // std::vector type\n\n            if constexpr (IsStdAtomic<T>::value)\n            {\n                // For atomic types, we have to store into them with a loop because they do not have an =operator()\n                // so we can't use std::fill\n                for (auto& value : buffer)\n                    value.store(memset_value);\n            }\n            else\n                std::fill(buffer.begin(), buffer.end(), memset_value);\n        }\n        else\n        {\n            std::vector<T> data(buffer.size(), memset_value);\n\n            buffer.upload_data(data);\n        }\n    }\n\n    template<template<typename> class BufferContainer, typename T>\n    void resize(BufferContainer<T>& buffer, std::size_t new_size)\n    {\n        if constexpr (IsStdAtomic<T>::value)\n            // If the buffer is a buffer of std::atomic on the CPU, we cannot use resize\n            // (because std::atomic are missing some operators used by\n            // std::vector.resize() so we have to recreate the buffer instead\n            buffer = std::decay_t<decltype(buffer)>(new_size);\n        else\n            buffer.resize(new_size);\n    }\n}\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/PrecomputedEmissiveTrianglesDataSoAHost.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_PERCOMPUTED_EMISSIVE_TRIANGLE_DATA_SOA_HOST_H\n#define RENDERER_PERCOMPUTED_EMISSIVE_TRIANGLE_DATA_SOA_HOST_H\n\n#include \"HostDeviceCommon/PrecomputedEmissiveTrianglesDataSoADevice.h\"\n\n#include \"Renderer/CPUGPUCommonDataStructures/GenericSoA.h\"\n\ntemplate <template <typename> typename DataContainer>\nusing PrecomputedEmissiveTrianglesDataSoAHost = GenericSoA<DataContainer, float3, float3, float3>;\n\nnamespace PrecomputedEmissiveTrianglesDataSoAHostHelpers\n{\n\tenum\n\t{\n\t\tVERTEX_A_BUFFER,\n\t\tAB_BUFFER,\n\t\tAC_BUFFER,\n\t};\n\n\ttemplate <template <typename> typename DataContainer>\n\tPrecomputedEmissiveTrianglesDataSoADevice to_device(PrecomputedEmissiveTrianglesDataSoAHost<DataContainer>& petd_host)\n\t{\n\t\tPrecomputedEmissiveTrianglesDataSoADevice petd_device;\n\n\t\tpetd_device.triangles_A = petd_host.template get_buffer_data_ptr<VERTEX_A_BUFFER>();\n\t\tpetd_device.triangles_AB = petd_host.template get_buffer_data_ptr<AB_BUFFER>();\n\t\tpetd_device.triangles_AC = petd_host.template get_buffer_data_ptr<AC_BUFFER>();\n\n\t\treturn petd_device;\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/ReGIRGridBufferSoAHost.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_REGIR_RESERVOIR_SOA_CPU_GPU_H\n#define RENDERER_REGIR_RESERVOIR_SOA_CPU_GPU_H\n\n#include \"Device/includes/ReSTIR/ReGIR/ReservoirSoA.h\"\n#include \"HostDeviceCommon/Packing.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/GenericSoA.h\"\n\ntemplate <template <typename> typename DataContainer>\n// using ReGIRSampleSoAHost = GenericSoA<DataContainer, int, unsigned int>;\nusing ReGIRSampleSoAHost = GenericSoA<DataContainer, int, float3>;\n\ntemplate <template <typename> typename DataContainer>\nusing ReGIRReservoirSoAHost = GenericSoA<DataContainer, float>;\n\nenum ReGIRSampleSoAHostBuffers\n{\n\tREGIR_SAMPLE_EMISSIVE_TRIANGLE_INDEX,\n\t// REGIR_SAMPLE_RANDOM_SEED\n\tREGIR_SAMPLE_POINT_ON_LIGHT\n};\n\nenum ReGIRReservoirSoAHostBuffers\n{\n\tREGIR_RESERVOIR_UCW\n};\n\ntemplate <template <typename> typename DataContainer>\nstruct ReGIRGridBufferSoAHost\n{\n\tvoid resize(int new_element_count)\n\t{\n\t\tsamples.resize(new_element_count);\n\t\treservoirs.resize(new_element_count);\n\t}\n\n\tvoid free()\n\t{\n\t\tsamples.free();\n\t\treservoirs.free();\n\t}\n\n\tstd::size_t get_byte_size() const\n\t{\n\t\treturn samples.get_byte_size() + reservoirs.get_byte_size();\n\t}\n\n\tunsigned int size() const\n\t{\n\t\treturn samples.size();\n\t}\n\n\tReGIRSampleSoAHost<DataContainer> samples;\n\tReGIRReservoirSoAHost<DataContainer> reservoirs;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/ReGIRHashCellDataSoAHost.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_REGIR_HASH_CELL_DATA_SOA_CPU_GPU_H\n#define RENDERER_REGIR_HASH_CELL_DATA_SOA_CPU_GPU_H\n\n#include \"Device/includes/ReSTIR/ReGIR/HashGridCellData.h\"\n\n#include \"HostDeviceCommon/Packing.h\"\n\n#include \"Renderer/CPUGPUCommonDataStructures/GenericSoA.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRGridBufferSoAHost.h\"\n\ntemplate <template <typename> typename DataContainer>\nusing ReGIRHashCellDataSoAHostInternal = GenericSoA<DataContainer, \n\tGenericAtomicType<int, DataContainer>,  // primitive index\n\tfloat3, // world points\n\tOctahedral24BitNormalPadded32b,  // world normals\n\tunsigned char,  // roughness\n\tunsigned char,  // metallic\n\tunsigned char,  // specular\n\tGenericAtomicType<unsigned int, DataContainer>, // hash keys\n\tGenericAtomicType<unsigned int, DataContainer>, // grid cells alive\n\tunsigned int // grid cells alive list\n>;\n\nenum ReGIRHashCellDataSoAHostBuffers\n{\n\tREGIR_HASH_CELL_PRIM_INDEX,\n\tREGIR_HASH_CELL_POINTS,\n\tREGIR_HASH_CELL_NORMALS,\n\tREGIR_HASH_CELL_ROUGHNESS,\n\tREGIR_HASH_CELL_METALLIC,\n\tREGIR_HASH_CELL_SPECULAR,\n\tREGIR_HASH_CELL_CHECKSUMS,\n\n\tREGIR_HASH_CELLS_ALIVE,\n\tREGIR_HASH_CELLS_ALIVE_LIST\n};\n\ntemplate <template <typename> typename DataContainer>\nstruct ReGIRHashCellDataSoAHost\n{\n\tvoid resize(unsigned int new_number_of_cells)\n\t{\n\t\tnew_number_of_cells = hippt::max(new_number_of_cells, 1u);\n\n\t\tm_hash_cell_data.resize(new_number_of_cells);\n\n\t\tm_hash_cell_data.template memset_buffer<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_CHECKSUMS>(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\t\tm_hash_cell_data.template memset_buffer<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_PRIM_INDEX>(ReGIRHashCellDataSoADevice::UNDEFINED_PRIMITIVE);\n\t\tm_hash_cell_data.template memset_buffer<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE>(0u);\n\t\tm_hash_cell_data.template memset_buffer<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE_LIST>(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\n\t\t// resize and memset_buffer static call\n\t\tGenericSoAHelpers::resize<DataContainer>(m_grid_cells_alive_count, 1);\n\t\tGenericSoAHelpers::memset_buffer<DataContainer>(m_grid_cells_alive_count, 0u);\n\t}\n\n\tvoid free()\n\t{\n\t\tm_hash_cell_data.free();\n\t}\n\n\tstd::size_t get_byte_size() const\n\t{\n\t\treturn m_hash_cell_data.get_byte_size();\n\t}\n\n\tunsigned int size() const\n\t{\n\t\treturn m_hash_cell_data.size();\n\t}\n\n\tReGIRHashCellDataSoADevice to_device()\n\t{\n\t\tReGIRHashCellDataSoADevice hash_cell_data;\n\n\t\thash_cell_data.hit_primitive = m_hash_cell_data.template get_buffer_data_atomic_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_PRIM_INDEX>();\n\t\thash_cell_data.world_points = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_POINTS>();\n\t\thash_cell_data.world_normals = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_NORMALS>();\n\t\thash_cell_data.roughness = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_ROUGHNESS>();\n\t\thash_cell_data.metallic = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_METALLIC>();\n\t\thash_cell_data.specular = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_SPECULAR>();\n\t\thash_cell_data.checksums = m_hash_cell_data.template get_buffer_data_atomic_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELL_CHECKSUMS>();\n\n\t\thash_cell_data.grid_cell_alive = m_hash_cell_data.template get_buffer_data_atomic_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE>();\n\t\thash_cell_data.grid_cells_alive_list = m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE_LIST>();\n\n\t\tif constexpr (std::is_same_v<DataContainer<std::atomic<unsigned int>>, std::vector<std::atomic<unsigned int>>>)\n\t\t\t// This buffer is an std::vector so we can just call .data()\n\t\t\thash_cell_data.grid_cells_alive_count = m_grid_cells_alive_count.data();\n\t\telse\n\t\t\t// For the GPU, we need to call .get_atomic_device_pointer()\n\t\t\thash_cell_data.grid_cells_alive_count = m_grid_cells_alive_count.get_atomic_device_pointer();\n\n\t\treturn hash_cell_data;\n\t}\n\n\tReGIRHashCellDataSoAHostInternal<DataContainer> m_hash_cell_data;\n\n\t// Not in the SoA because this buffer's size doesn't follow the size of the other buffers.\n\t//\n\t// This one always just has size 1\n\tDataContainer<GenericAtomicType<unsigned int, DataContainer>> m_grid_cells_alive_count;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/ReGIRHashGridSoAHost.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_REGIR_HASH_GRID_SOA_CPU_GPU_H\n#define RENDERER_REGIR_HASH_GRID_SOA_CPU_GPU_H\n\n#include \"Device/includes/ReSTIR/ReGIR/HashGridSoADevice.h\"\n\n#include \"HostDeviceCommon/Packing.h\"\n\n#include \"Renderer/CPUGPUCommonDataStructures/GenericSoA.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRGridBufferSoAHost.h\"\n\ntemplate <template <typename> typename DataContainer>\nstruct ReGIRHashGridSoAHost\n{\n\tvoid resize(unsigned int new_cell_count, unsigned int reservoirs_per_cell)\n\t{\n\t\tnew_cell_count = hippt::max(new_cell_count, 1u);\n\n\t\tm_total_number_of_cells = new_cell_count;\n\t\tm_reservoirs_per_cell = reservoirs_per_cell;\n\n\t\tsamples.resize(m_total_number_of_cells * reservoirs_per_cell);\n\t\treservoirs.resize(m_total_number_of_cells * reservoirs_per_cell);\n\n\t\tsamples.template memset_buffer<ReGIRSampleSoAHostBuffers::REGIR_SAMPLE_EMISSIVE_TRIANGLE_INDEX>(-1);\n\t\treservoirs.template memset_buffer<ReGIRReservoirSoAHostBuffers::REGIR_RESERVOIR_UCW>(ReGIRReservoir::UNDEFINED_UCW);\n\t}\n\n\tvoid free()\n\t{\n\t\tsamples.free();\n\t\treservoirs.free();\n\n\t\tm_total_number_of_cells = 0;\n\t}\n\n\tstd::size_t get_byte_size() const\n\t{\n\t\treturn samples.get_byte_size() + reservoirs.get_byte_size();\n\t}\n\n\tunsigned int size_reservoirs() const\n\t{\n\t\treturn samples.size();\n\t}\n\n\tvoid to_device(ReGIRHashGridSoADevice& out_soa_device)\n\t{\n\t\tout_soa_device.samples.emissive_triangle_index = samples.template get_buffer_data_ptr<ReGIRSampleSoAHostBuffers::REGIR_SAMPLE_EMISSIVE_TRIANGLE_INDEX>();\n\t\tout_soa_device.samples.point_on_light = samples.template get_buffer_data_ptr<ReGIRSampleSoAHostBuffers::REGIR_SAMPLE_POINT_ON_LIGHT>();\n\n\t\tout_soa_device.reservoirs.UCW = reservoirs.template get_buffer_data_ptr<ReGIRReservoirSoAHostBuffers::REGIR_RESERVOIR_UCW>();\n\t\tout_soa_device.reservoirs.number_of_reservoirs_per_cell = m_reservoirs_per_cell;\n\n\t\tout_soa_device.m_total_number_of_cells = m_total_number_of_cells;\n\t}\n\n\tReGIRSampleSoAHost<DataContainer> samples;\n\tReGIRReservoirSoAHost<DataContainer> reservoirs;\n\n\tunsigned int m_total_number_of_cells = 0;\n\tunsigned int m_reservoirs_per_cell = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPUGPUCommonDataStructures/ReGIRPresampledLightsSoAHost.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_REGIR_PRESAMPLED_LIGHTS_SOA_CPU_GPU_H\n#define RENDERER_REGIR_PRESAMPLED_LIGHTS_SOA_CPU_GPU_H\n\n#include \"HostDeviceCommon/Packing.h\"\n\n#include \"Renderer/CPUGPUCommonDataStructures/GenericSoA.h\"\n\ntemplate <template <typename> typename DataContainer>\nusing ReGIRPresampledLightsSoAHostInternal = GenericSoA<DataContainer,\n\tint,  // primitive index\n\tfloat, // light area\n\tfloat3, // point on light\n\tOctahedral24BitNormalPadded32b, // light normals\n\tFloat3xLengthUint10bPacked // emission\n>;\n\nenum ReGIRPresampledLightsSoAHostBuffers\n{\n\tREGIR_PRESAMPLED_LIGHTS_TRIANGLE_INDEX,\n\tREGIR_PRESAMPLED_LIGHTS_LIGHT_AREA,\n\tREGIR_PRESAMPLED_LIGHTS_POINT_ON_LIGHT,\n\tREGIR_PRESAMPLED_LIGHTS_LIGHT_NORMAL,\n\tREGIR_PRESAMPLED_LIGHTS_EMISSION\n};\n\ntemplate <template <typename> typename DataContainer>\nstruct ReGIRPresampledLightsSoAHost\n{\n\tvoid resize(unsigned int new_presampled_lights_count)\n\t{\n\t\tm_hash_cell_data.resize(new_presampled_lights_count);\n\t}\n\n\tvoid free()\n\t{\n\t\tm_hash_cell_data.free();\n\t}\n\n\tstd::size_t get_byte_size() const\n\t{\n\t\treturn m_hash_cell_data.get_byte_size();\n\t}\n\n\tunsigned int size() const\n\t{\n\t\treturn m_hash_cell_data.size();\n\t}\n\n\tvoid to_device(ReGIRPresampledLightsSoADevice& soa_device)\n\t{\n\t\tsoa_device.emissive_triangle_index = m_hash_cell_data.template get_buffer_data_ptr<ReGIRPresampledLightsSoAHostBuffers::REGIR_PRESAMPLED_LIGHTS_TRIANGLE_INDEX>();\n\t\tsoa_device.light_area = m_hash_cell_data.template get_buffer_data_ptr<ReGIRPresampledLightsSoAHostBuffers::REGIR_PRESAMPLED_LIGHTS_LIGHT_AREA>();\n\t\tsoa_device.point_on_light = m_hash_cell_data.template get_buffer_data_ptr<ReGIRPresampledLightsSoAHostBuffers::REGIR_PRESAMPLED_LIGHTS_POINT_ON_LIGHT>();\n\t\tsoa_device.light_normal = m_hash_cell_data.template get_buffer_data_ptr<ReGIRPresampledLightsSoAHostBuffers::REGIR_PRESAMPLED_LIGHTS_LIGHT_NORMAL>();\n\t\tsoa_device.emission = m_hash_cell_data.template get_buffer_data_ptr<ReGIRPresampledLightsSoAHostBuffers::REGIR_PRESAMPLED_LIGHTS_EMISSION>();\n\t}\n\n\tReGIRPresampledLightsSoAHostInternal<DataContainer> m_hash_cell_data;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/CPURenderer.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Device/kernels/CameraRays.h\"\n#include \"Device/kernels/Megakernel.h\"\n#include \"Device/kernels/GMoN/GMoNComputeMedianOfMeans.h\"\n#include \"Device/kernels/NEE++/NEEPlusPlusFinalizeAccumulation.h\"\n\n#include \"Device/kernels/ReSTIR/ReGIR/GridFillTemporalReuse.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/GridPrepopulate.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/LightPresampling.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/PreIntegration.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/Rehash.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/SpatialReuse.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/SupersamplingCopy.h\"\n\n#include \"Device/kernels/ReSTIR/DirectionalReuseCompute.h\"\n\n#include \"Device/kernels/ReSTIR/DI/LightsPresampling.h\"\n#include \"Device/kernels/ReSTIR/DI/InitialCandidates.h\"\n#include \"Device/kernels/ReSTIR/DI/TemporalReuse.h\"\n#include \"Device/kernels/ReSTIR/DI/SpatialReuse.h\"\n#include \"Device/kernels/ReSTIR/DI/FusedSpatiotemporalReuse.h\"\n\n#include \"Device/kernels/ReSTIR/GI/InitialCandidates.h\"\n#include \"Device/kernels/ReSTIR/GI/SpatialReuse.h\"\n#include \"Device/kernels/ReSTIR/GI/TemporalReuse.h\"\n#include \"Device/kernels/ReSTIR/GI/Shading.h\"\n\n#include \"Renderer/Baker/GPUBaker.h\"\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n#include \"Renderer/CPURenderer.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/ApplicationSettings.h\"\n\n#include <atomic>\n#include <chrono>\n#include <omp.h>\n\n // If 1, only the pixel at DEBUG_PIXEL_X and DEBUG_PIXEL_Y will be rendered,\n // allowing for fast step into that pixel with the debugger to see what's happening.\n // Otherwise if 0, all pixels of the image are rendered\n#define DEBUG_PIXEL 0\n\n// If 0, the pixel with coordinates (x, y) = (0, 0) is top left corner.\n// If 1, it's bottom left corner.\n// Useful if you're using an image viewer to get the the coordinates of \n// the interesting pixel. If that image viewer has its (0, 0) in the top\n// left corner, you'll need to set that DEBUG_FLIP_Y to 0. Set 1 to if\n// you're measuring the coordinates of the pixel with (0, 0) in the bottom left corner\n#define DEBUG_FLIP_Y 0\n\n// Coordinates of the pixel whose neighborhood needs to rendered (useful for algorithms\n// where pixels are not completely independent from each other such as ReSTIR Spatial Reuse).\n// \n// The neighborhood around pixel will be rendered if DEBUG_RENDER_NEIGHBORHOOD is 1.\n#define DEBUG_PIXEL_X 572\n#define DEBUG_PIXEL_Y 346\n\n// Same as DEBUG_FLIP_Y but for the \"other debug pixel\"\n#define DEBUG_OTHER_FLIP_Y 0\n\n// Allows to render the neighborhood around the DEBUG_PIXEL_X/Y but to debug at the location\n// of DEBUG_OTHER_PIXEL_X/Y given below.\n// \n// -1 to disable. If disabled, the pixel at (DEBUG_PIXEL_X, DEBUG_PIXEL_Y) will be debugged\n#define DEBUG_OTHER_PIXEL_X -1\n#define DEBUG_OTHER_PIXEL_Y -1\n\n// If 1, a square of DEBUG_NEIGHBORHOOD_SIZE x DEBUG_NEIGHBORHOOD_SIZE pixels\n// will be rendered around the pixel to debug (given by DEBUG_PIXEL_X and\n// DEBUG_PIXEL_Y). The pixel of interest is going to be rendered first so you\n// can just set a breakpoint in the pass of interest and it will break when rendering the\n// pixel that you want to debug.\n// This can be useful when debugging spatial passes such as ReSTIR spatial reusing.\n// If you were only rendering the precise pixel at the given debug coordinates, you\n// wouldn't be able to debug correctly since all the neighborhood wouldn't have been\n// rendered which means no reservoir which means improper rendering\n#define DEBUG_RENDER_NEIGHBORHOOD 1\n// How many pixels to render around the debugged pixel given by the DEBUG_PIXEL_X and\n// DEBUG_PIXEL_Y coordinates\n#define DEBUG_NEIGHBORHOOD_SIZE 200\n\nCPURenderer::CPURenderer(int width, int height) : m_resolution(make_int2(width, height))\n{\n    m_framebuffer = Image32Bit(width, height, 3);\n\n    m_render_data.render_settings.render_resolution = m_resolution;\n\n    // Resizing buffers + initial value\n    m_pixel_active_buffer.resize(width * height, 0);\n    m_denoiser_albedo.resize(width * height, ColorRGB32F(0.0f));\n    m_denoiser_normals.resize(width * height, float3{ 0.0f, 0.0f, 0.0f });\n    m_pixel_sample_count.resize(width * height, 0);\n    m_pixel_converged_sample_count.resize(width * height, 0);\n    m_pixel_squared_luminance.resize(width * height, 0.0f);\n\n\n    unsigned int new_cell_count_primary_hits = ReGIRHashGridStorage::DEFAULT_GRID_CELL_COUNT_PRIMARY_HITS;\n    unsigned int new_cell_count_secondary_hits = ReGIRHashGridStorage::DEFAULT_GRID_CELL_COUNT_SECONDARY_HITS;\n\n    m_regir_state.presampled_lights.resize(m_render_data.render_settings.regir_settings.presampled_lights.get_presampled_light_count());\n\n    m_regir_state.grid_buffer_primary_hit.resize(new_cell_count_primary_hits, m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(true));\n    m_regir_state.spatial_grid_buffer_primary_hit.resize(new_cell_count_primary_hits, m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(true));\n    m_regir_state.hash_cell_data_primary_hit.resize(new_cell_count_primary_hits);\n\n    m_regir_state.grid_buffer_secondary_hit.resize(new_cell_count_secondary_hits, m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(false));\n    m_regir_state.spatial_grid_buffer_secondary_hit.resize(new_cell_count_secondary_hits, m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(false));\n    m_regir_state.hash_cell_data_secondary_hit.resize(new_cell_count_secondary_hits);\n\n    m_regir_state.non_canonical_pre_integration_factors_primary_hit = std::vector<AtomicType<float>>(new_cell_count_primary_hits); std::fill(m_regir_state.non_canonical_pre_integration_factors_primary_hit.begin(), m_regir_state.non_canonical_pre_integration_factors_primary_hit.end(), 0.0f);\n    m_regir_state.canonical_pre_integration_factors_primary_hit = std::vector<AtomicType<float>>(new_cell_count_primary_hits); std::fill(m_regir_state.canonical_pre_integration_factors_primary_hit.begin(), m_regir_state.canonical_pre_integration_factors_primary_hit.end(), 0.0f);\n\n    m_regir_state.non_canonical_pre_integration_factors_secondary_hit = std::vector<AtomicType<float>>(new_cell_count_primary_hits); std::fill(m_regir_state.non_canonical_pre_integration_factors_secondary_hit.begin(), m_regir_state.non_canonical_pre_integration_factors_secondary_hit.end(), 0.0f);\n    m_regir_state.canonical_pre_integration_factors_secondary_hit = std::vector<AtomicType<float>>(new_cell_count_primary_hits); std::fill(m_regir_state.canonical_pre_integration_factors_secondary_hit.begin(), m_regir_state.canonical_pre_integration_factors_secondary_hit.end(), 0.0f);\n\n    if (m_render_data.render_settings.regir_settings.supersampling.do_correlation_reduction)\n        m_regir_state.correlation_reduction_grid.resize(new_cell_count_primary_hits, m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(true) * m_render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor);\n\n\n\n    m_restir_di_state.initial_candidates_reservoirs.resize(width * height);\n    m_restir_di_state.spatial_output_reservoirs_1.resize(width * height);\n    m_restir_di_state.spatial_output_reservoirs_2.resize(width * height);\n    m_restir_di_state.presampled_lights_buffer.resize(width * height);\n    m_restir_di_state.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n#if ReSTIR_DI_SpatialDirectionalReuseBitCount > 32\n    m_restir_di_state.per_pixel_spatial_reuse_directions_mask_ull.resize(width * height);\n#else\n    m_restir_di_state.per_pixel_spatial_reuse_directions_mask_u.resize(width * height);\n#endif\n    m_restir_di_state.per_pixel_spatial_reuse_radius.resize(width * height);\n\n    m_restir_gi_state.initial_candidates_reservoirs.resize(width * height);\n    m_restir_gi_state.temporal_reservoirs.resize(width * height);\n    m_restir_gi_state.spatial_reservoirs.resize(width * height);\n#if ReSTIR_GI_SpatialDirectionalReuseBitCount > 32\n    m_restir_gi_state.per_pixel_spatial_reuse_directions_mask_ull.resize(width * height);\n#else\n    m_restir_gi_state.per_pixel_spatial_reuse_directions_mask_u.resize(width * height);\n#endif\n    m_restir_gi_state.per_pixel_spatial_reuse_radius.resize(width * height);\n\n    m_g_buffer.resize(width * height);\n    m_g_buffer_prev_frame.resize(width * height);\n\n    setup_brdfs_data();\n    setup_nee_plus_plus();\n    setup_gmon();\n}\n\nvoid CPURenderer::setup_brdfs_data()\n{\n    m_sheen_ltc_params = Image32Bit(reinterpret_cast<float*>(ltc_parameters_table_approximation.data()), 32, 32, 3);\n    m_GGX_conductor_directional_albedo = Image32Bit::read_image_hdr(\"../data/BRDFsData/GGX/\" + GPUBakerConstants::get_GGX_conductor_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing), 1, true);\n\n    std::vector<Image32Bit> images(GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR);\n    for (int i = 0; i < GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR; i++)\n    {\n        std::string filename = std::to_string(i) + GPUBakerConstants::get_glossy_dielectric_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n        std::string filepath = \"../data/BRDFsData/GlossyDielectrics/\" + filename;\n        images[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n    }\n    m_glossy_dielectrics_directional_albedo = Image32Bit3D(images);\n\n    images.resize(GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n    for (int i = 0; i < GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n    {\n        std::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_glass_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n        std::string filepath = \"../data/BRDFsData/GGX/Glass/\" + filename;\n        images[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n    }\n    m_GGX_glass_directional_albedo = Image32Bit3D(images);\n\n    for (int i = 0; i < GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n    {\n        std::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_glass_directional_albedo_inv_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n        std::string filepath = \"../data/BRDFsData/GGX/Glass/\" + filename;\n        images[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n    }\n    m_GGX_glass_inverse_directional_albedo = Image32Bit3D(images);\n\n    images.resize(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n    for (int i = 0; i < GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n    {\n        std::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_thin_glass_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n        std::string filepath = \"../data/BRDFsData/GGX/Glass/\" + filename;\n        images[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n    }\n    m_GGX_thin_glass_directional_albedo = Image32Bit3D(images);\n}\n\nvoid CPURenderer::setup_nee_plus_plus()\n{\n#if DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE\n    // Only doing if using NEE++ \n\n    m_nee_plus_plus.total_num_rays = std::vector<AtomicType<unsigned int>>(1000000);\n    m_nee_plus_plus.total_unoccluded_rays = std::vector<AtomicType<unsigned int>>(1000000);\n    m_nee_plus_plus.num_rays_staging = std::vector<AtomicType<unsigned int>>(1000000);\n    m_nee_plus_plus.unoccluded_rays_staging = std::vector<AtomicType<unsigned int>>(1000000);\n    m_nee_plus_plus.checksum_buffer = std::vector<AtomicType<unsigned int>>(1000000);\n    for (AtomicType<unsigned int>& checksum : m_nee_plus_plus.checksum_buffer)\n        checksum.store(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\n    m_render_data.nee_plus_plus.m_entries_buffer.total_num_rays = m_nee_plus_plus.total_num_rays.data();\n    m_render_data.nee_plus_plus.m_entries_buffer.total_unoccluded_rays = m_nee_plus_plus.total_unoccluded_rays.data();\n    //m_render_data.nee_plus_plus.m_entries_buffer.num_rays_staging = m_nee_plus_plus.num_rays_staging.data();\n    //m_render_data.nee_plus_plus.m_entries_buffer.unoccluded_rays_staging = m_nee_plus_plus.unoccluded_rays_staging.data();\n    m_render_data.nee_plus_plus.m_entries_buffer.checksum_buffer = m_nee_plus_plus.checksum_buffer.data();\n\n    m_render_data.nee_plus_plus.m_total_number_of_cells = 1000000;\n\n    m_render_data.nee_plus_plus.m_total_shadow_ray_queries = &m_nee_plus_plus.total_shadow_ray_queries;\n    m_render_data.nee_plus_plus.m_shadow_rays_actually_traced = &m_nee_plus_plus.shadow_rays_actually_traced;\n    m_render_data.nee_plus_plus.m_total_cells_alive_count = &m_nee_plus_plus.total_cell_alive_count;\n#endif\n}\n\nvoid CPURenderer::setup_gmon()\n{\n    if (m_render_data.render_settings.samples_per_frame < m_gmon.number_of_sets)\n        m_gmon.using_gmon = false;\n\n    if (m_gmon.using_gmon)\n    {\n        m_gmon.resize(m_resolution.x, m_resolution.y);\n        m_render_data.buffers.gmon_estimator.sets = m_gmon.sets.data();\n        m_render_data.buffers.gmon_estimator.result_framebuffer = m_gmon.result_framebuffer.get_data_as_ColorRGB32F();\n    }\n}\n\nvoid CPURenderer::nee_plus_plus_memcpy_accumulation(int frame_number)\n{\n#if DirectLightUseNEEPlusPlus == KERNEL_OPTION_TRUE\n    bool enough_frames_passed = frame_number % m_nee_plus_plus.frame_timer_before_visibility_map_update == 0;\n    bool not_updating_vis_map_anymore = !m_render_data.nee_plus_plus.m_update_visibility_map;\n    if (!enough_frames_passed || not_updating_vis_map_anymore)\n        return;\n\n    // Only doing if using NEE++\n    for (int x = 0; x < m_render_data.nee_plus_plus.m_total_number_of_cells; x++)\n        NEEPlusPlusFinalizeAccumulation(m_render_data.nee_plus_plus, x);\n#else\n    // Otherwise, it's a no-op\n#endif\n}\n\nvoid CPURenderer::gmon_check_for_sets_accumulation()\n{\n    if (m_gmon.using_gmon)\n    {\n        m_render_data.buffers.gmon_estimator.next_set_to_accumulate++;\n\n        if (m_render_data.buffers.gmon_estimator.next_set_to_accumulate % m_gmon.number_of_sets == 0)\n        {\n            // We've added 1 sample to each sets of GMoN so we can compute the median of means\n            gmon_compute_median_of_means();\n\n            m_render_data.buffers.gmon_estimator.next_set_to_accumulate = 0;\n        }\n    }\n}\n\nvoid CPURenderer::ReGIR_post_render_update()\n{\n#if DirectLightSamplingBaseStrategy != LSS_BASE_REGIR\n    return;\n#endif\n\n    if (m_render_data.render_settings.regir_settings.supersampling.do_correlation_reduction)\n    {\n        ReGIRHashGridSoADevice to_copy;\n        if (m_render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n            to_copy = m_render_data.render_settings.regir_settings.get_actual_spatial_output_reservoirs_grid(true);\n        else\n            to_copy = m_render_data.render_settings.regir_settings.get_initial_reservoirs_grid(true);\n\n#pragma omp parallel for\n        for (int x = 0; x < *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(true).grid_cells_alive_count * m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(true); x++)\n        {\n            ReGIR_Supersampling_Copy(m_render_data, to_copy, x);\n        }\n\n        m_render_data.render_settings.regir_settings.supersampling.correl_reduction_current_grid++;\n        m_render_data.render_settings.regir_settings.supersampling.correl_reduction_current_grid %= m_render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor;\n\n        m_render_data.render_settings.regir_settings.supersampling.correl_frames_available++;\n        m_render_data.render_settings.regir_settings.supersampling.correl_frames_available = hippt::min(m_render_data.render_settings.regir_settings.supersampling.correl_frames_available, m_render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor);\n    }\n}\n\nvoid CPURenderer::set_scene(Scene& parsed_scene)\n{\n    m_render_data.GPU_BVH = nullptr;\n\n    std::vector<DevicePackedTexturedMaterial> gpu_packed_materials;\n    gpu_packed_materials.resize(parsed_scene.materials.size());\n    for (int i = 0; i < parsed_scene.materials.size(); i++)\n        gpu_packed_materials[i] = parsed_scene.materials[i].pack_to_GPU();\n\n    m_gpu_packed_materials.upload_data(gpu_packed_materials);\n    m_render_data.buffers.materials_buffer = m_gpu_packed_materials.get_device_SoA_struct();\n    m_render_data.buffers.material_indices = parsed_scene.material_indices.data();\n\n    // Computing the opaqueness of materials i.e. whether or not they are FULLY opaque\n    m_material_opaque.resize(parsed_scene.materials.size());\n    for (int i = 0; i < parsed_scene.materials.size(); i++)\n        m_material_opaque[i] = parsed_scene.material_has_opaque_base_color_texture[i] && parsed_scene.materials[i].alpha_opacity == 1.0f;\n\n    m_render_data.buffers.material_opaque = m_material_opaque.data();\n    m_render_data.buffers.has_vertex_normals = parsed_scene.has_vertex_normals.data();\n    m_render_data.buffers.accumulated_ray_colors = m_framebuffer.get_data_as_ColorRGB32F();\n    m_render_data.buffers.triangles_indices = parsed_scene.triangles_vertex_indices.data();\n    m_render_data.buffers.vertices_positions = parsed_scene.vertices_positions.data();\n    m_render_data.buffers.vertex_normals = parsed_scene.vertex_normals.data();\n    m_render_data.buffers.texcoords = parsed_scene.texcoords.data();\n    m_render_data.buffers.triangles_areas = parsed_scene.triangle_areas.data();\n\n    m_render_data.bsdfs_data.sheen_ltc_parameters_texture = &m_sheen_ltc_params;\n    m_render_data.bsdfs_data.GGX_conductor_directional_albedo = &m_GGX_conductor_directional_albedo;\n    m_render_data.bsdfs_data.glossy_dielectric_directional_albedo = &m_glossy_dielectrics_directional_albedo;\n    m_render_data.bsdfs_data.GGX_glass_directional_albedo = &m_GGX_glass_directional_albedo;\n    m_render_data.bsdfs_data.GGX_glass_directional_albedo_inverse = &m_GGX_glass_inverse_directional_albedo;\n    m_render_data.bsdfs_data.GGX_thin_glass_directional_albedo = &m_GGX_thin_glass_directional_albedo;\n\n    ThreadManager::join_threads(ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY);\n    m_render_data.buffers.material_textures = parsed_scene.textures.data();\n\n    m_render_data.aux_buffers.pixel_active = m_pixel_active_buffer.data();\n    m_render_data.aux_buffers.denoiser_albedo = m_denoiser_albedo.data();\n    m_render_data.aux_buffers.denoiser_normals = m_denoiser_normals.data();\n    m_render_data.aux_buffers.pixel_sample_count = m_pixel_sample_count.data();\n    m_render_data.aux_buffers.pixel_converged_sample_count = m_pixel_converged_sample_count.data();\n    m_render_data.aux_buffers.pixel_squared_luminance = m_pixel_squared_luminance.data();\n    m_render_data.aux_buffers.still_one_ray_active = &m_still_one_ray_active;\n    m_render_data.aux_buffers.pixel_count_converged_so_far = &m_stop_noise_threshold_count;\n\n    m_render_data.g_buffer.materials = m_g_buffer.materials.data();\n    m_render_data.g_buffer.geometric_normals = m_g_buffer.geometric_normals.data();\n    m_render_data.g_buffer.shading_normals = m_g_buffer.shading_normals.data();\n    m_render_data.g_buffer.primary_hit_position = m_g_buffer.primary_hit_position.data();\n    m_render_data.g_buffer.first_hit_prim_index = m_g_buffer.first_hit_prim_index.data();\n\n    m_render_data.g_buffer_prev_frame.materials = m_g_buffer_prev_frame.materials.data();\n    m_render_data.g_buffer_prev_frame.geometric_normals = m_g_buffer_prev_frame.geometric_normals.data();\n    m_render_data.g_buffer_prev_frame.shading_normals = m_g_buffer_prev_frame.shading_normals.data();\n    m_render_data.g_buffer_prev_frame.primary_hit_position = m_g_buffer_prev_frame.primary_hit_position.data();\n    m_render_data.g_buffer_prev_frame.first_hit_prim_index = m_g_buffer_prev_frame.first_hit_prim_index.data();\n\n\n\n\n\n    m_regir_state.presampled_lights.to_device(m_render_data.render_settings.regir_settings.presampled_lights.presampled_lights_soa);\n\n    m_regir_state.grid_buffer_primary_hit.to_device(m_render_data.render_settings.regir_settings.initial_reservoirs_primary_hits_grid);\n    m_regir_state.spatial_grid_buffer_primary_hit.to_device(m_render_data.render_settings.regir_settings.spatial_output_primary_hits_grid);\n    m_render_data.render_settings.regir_settings.hash_cell_data_primary_hits = m_regir_state.hash_cell_data_primary_hit.to_device();\n\n    m_regir_state.grid_buffer_secondary_hit.to_device(m_render_data.render_settings.regir_settings.initial_reservoirs_secondary_hits_grid);\n    m_regir_state.spatial_grid_buffer_secondary_hit.to_device(m_render_data.render_settings.regir_settings.spatial_output_secondary_hits_grid);\n    m_render_data.render_settings.regir_settings.hash_cell_data_secondary_hits = m_regir_state.hash_cell_data_secondary_hit.to_device();\n\n    m_regir_state.correlation_reduction_grid.to_device(m_render_data.render_settings.regir_settings.supersampling.correlation_reduction_grid);\n\n    m_render_data.render_settings.regir_settings.non_canonical_pre_integration_factors_primary_hits = m_regir_state.non_canonical_pre_integration_factors_primary_hit.data();\n    m_render_data.render_settings.regir_settings.canonical_pre_integration_factors_primary_hits = m_regir_state.canonical_pre_integration_factors_primary_hit.data();\n\n    m_render_data.render_settings.regir_settings.non_canonical_pre_integration_factors_secondary_hits = m_regir_state.non_canonical_pre_integration_factors_primary_hit.data();\n    m_render_data.render_settings.regir_settings.canonical_pre_integration_factors_secondary_hits = m_regir_state.canonical_pre_integration_factors_primary_hit.data();\n\n    m_render_data.render_settings.restir_di_settings.light_presampling.light_samples = m_restir_di_state.presampled_lights_buffer.data();\n    m_render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs = m_restir_di_state.initial_candidates_reservoirs.data();\n    m_render_data.render_settings.restir_di_settings.restir_output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n    m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u = m_restir_di_state.per_pixel_spatial_reuse_directions_mask_u.data();\n    m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull = m_restir_di_state.per_pixel_spatial_reuse_directions_mask_ull.data();\n    m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_radius = m_restir_di_state.per_pixel_spatial_reuse_radius.data();\n    m_render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_reuse_hit_rate_total = &m_restir_di_state.spatial_reuse_hit_rate_total;\n    m_render_data.render_settings.restir_di_settings.common_spatial_pass.spatial_reuse_hit_rate_hits = &m_restir_di_state.spatial_reuse_hit_rate_hits;\n\n    m_render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer = m_restir_gi_state.initial_candidates_reservoirs.data();\n    m_render_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs = m_restir_gi_state.initial_candidates_reservoirs.data();\n    m_render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs = m_restir_gi_state.temporal_reservoirs.data();\n    m_render_data.render_settings.restir_gi_settings.spatial_pass.input_reservoirs = m_restir_gi_state.temporal_reservoirs.data();\n    m_render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs = m_restir_gi_state.spatial_reservoirs.data();\n    m_render_data.aux_buffers.restir_gi_reservoir_buffer_1 = m_restir_gi_state.initial_candidates_reservoirs.data();\n    m_render_data.aux_buffers.restir_gi_reservoir_buffer_2 = m_restir_gi_state.spatial_reservoirs.data();\n    m_render_data.aux_buffers.restir_gi_reservoir_buffer_3 = m_restir_gi_state.temporal_reservoirs.data();\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u = m_restir_gi_state.per_pixel_spatial_reuse_directions_mask_u.data();\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull = m_restir_gi_state.per_pixel_spatial_reuse_directions_mask_ull.data();\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius = m_restir_gi_state.per_pixel_spatial_reuse_radius.data();\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.spatial_reuse_hit_rate_total = &m_restir_gi_state.spatial_reuse_hit_rate_total;\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.spatial_reuse_hit_rate_hits = &m_restir_gi_state.spatial_reuse_hit_rate_hits;\n\n    ThreadManager::join_threads(ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES);\n    m_render_data.buffers.emissive_triangles_count = parsed_scene.emissive_triangles_primitive_indices.size();\n    m_render_data.buffers.emissive_triangles_primitive_indices = parsed_scene.emissive_triangles_primitive_indices.data();\n    m_render_data.buffers.emissive_triangles_primitive_indices_and_emissive_textures = parsed_scene.emissive_triangles_primitive_indices_and_emissive_textures.data();\n\n    std::cout << \"Building scene's BVH...\" << std::endl;\n    m_triangle_buffer = parsed_scene.get_triangles(parsed_scene.triangles_vertex_indices);\n    m_emissive_triangles_buffer = parsed_scene.get_triangles(parsed_scene.emissive_triangle_vertex_indices);\n\n    m_bvh = std::make_shared<BVH>(&m_triangle_buffer);\n    m_light_bvh = std::make_shared<BVH>(&m_emissive_triangles_buffer);\n\n    m_render_data.cpu_only.bvh = m_bvh.get();\n    m_render_data.cpu_only.light_bvh = m_light_bvh.get();\n\n#if DirectLightSamplingBaseStrategy == LSS_BASE_POWER || (DirectLightSamplingBaseStrategy == LSS_BASE_REGIR && ReGIR_GridFillLightSamplingBaseStrategy == LSS_BASE_POWER)\n    std::cout << \"Building scene's power alias table\" << std::endl;\n    compute_emissives_power_alias_table(parsed_scene);\n#endif\n}\n\nvoid CPURenderer::compute_emissives_power_alias_table(const Scene& scene)\n{\n    ThreadManager::add_dependency(ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE, ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES);\n    ThreadManager::start_thread(ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE, [this, &scene]()\n        {\n            auto start = std::chrono::high_resolution_clock::now();\n\n            std::vector<float> power_list(scene.emissive_triangles_primitive_indices.size());\n            float power_sum = 0.0f;\n\n            for (int i = 0; i < scene.emissive_triangles_primitive_indices.size(); i++)\n            {\n                int emissive_triangle_index = scene.emissive_triangles_primitive_indices[i];\n\n                // Computing the area of the triangle\n                float3 vertex_A = scene.vertices_positions[scene.triangles_vertex_indices[emissive_triangle_index * 3 + 0]];\n                float3 vertex_B = scene.vertices_positions[scene.triangles_vertex_indices[emissive_triangle_index * 3 + 1]];\n                float3 vertex_C = scene.vertices_positions[scene.triangles_vertex_indices[emissive_triangle_index * 3 + 2]];\n\n                float3 AB = vertex_B - vertex_A;\n                float3 AC = vertex_C - vertex_A;\n\n                float3 normal = hippt::cross(AB, AC);\n                float length_normal = hippt::length(normal);\n                float triangle_area = 0.5f * length_normal;\n\n                int mat_index = scene.material_indices[emissive_triangle_index];\n                float emission_luminance = scene.materials[mat_index].emission.luminance() * scene.materials[mat_index].emission_strength * scene.materials[mat_index].global_emissive_factor;\n\n                float area_power = emission_luminance * triangle_area;\n\n                power_list[i] = area_power;\n                power_sum += area_power;\n            }\n\n            Utils::compute_alias_table(power_list, power_sum, m_power_alias_table_probas, m_power_alias_table_alias);\n\n            m_render_data.buffers.emissives_power_alias_table.alias_table_alias = m_power_alias_table_alias.data();\n            m_render_data.buffers.emissives_power_alias_table.alias_table_probas = m_power_alias_table_probas.data();\n            m_render_data.buffers.emissives_power_alias_table.sum_elements = power_sum;\n            m_render_data.buffers.emissives_power_alias_table.size = scene.emissive_triangles_primitive_indices.size();\n\n            auto stop = std::chrono::high_resolution_clock::now();\n            std::cout << \"Power alias table construction time: \" << std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count() << \"ms\" << std::endl;\n        });\n}\n\nvoid CPURenderer::set_envmap(Image32Bit& envmap_image)\n{\n    ThreadManager::join_threads(ThreadManager::ENVMAP_LOAD_FROM_DISK_THREAD);\n\n    if (envmap_image.width == 0 || envmap_image.height == 0)\n    {\n        m_render_data.world_settings.ambient_light_type = AmbientLightType::UNIFORM;\n\n        std::cout << \"Empty envmap set on the CPURenderer... Defaulting to uniform ambient light type\" << std::endl;\n\n        return;\n    }\n\n    if (EnvmapSamplingStrategy == ESS_BINARY_SEARCH)\n    {\n        m_envmap_cdf = envmap_image.compute_cdf();\n        m_render_data.world_settings.envmap_total_sum = m_envmap_cdf.back();\n    }\n    else if (EnvmapSamplingStrategy == ESS_ALIAS_TABLE)\n    {\n        float total_sum;\n\n        envmap_image.compute_alias_table(m_envmap_alias_table_probas, m_envmap_alias_table_alias, &total_sum);\n        m_render_data.world_settings.envmap_total_sum = total_sum;\n    }\n\n    m_packed_envmap.pack_from(envmap_image);\n    m_render_data.world_settings.envmap = m_packed_envmap.get_data_pointer();\n    m_render_data.world_settings.envmap_width = envmap_image.width;\n    m_render_data.world_settings.envmap_height = envmap_image.height;\n    m_render_data.world_settings.ambient_light_type = AmbientLightType::ENVMAP;\n\n    if (EnvmapSamplingStrategy == ESS_BINARY_SEARCH)\n        m_render_data.world_settings.envmap_cdf = m_envmap_cdf.data();\n    else if (EnvmapSamplingStrategy == ESS_ALIAS_TABLE)\n    {\n        m_render_data.world_settings.envmap_alias_table.alias_table_probas = m_envmap_alias_table_probas.data();\n        m_render_data.world_settings.envmap_alias_table.alias_table_alias = m_envmap_alias_table_alias.data();\n        m_render_data.world_settings.envmap_alias_table.sum_elements = m_render_data.world_settings.envmap_total_sum;\n        m_render_data.world_settings.envmap_alias_table.size = envmap_image.width * envmap_image.height;\n    }\n}\n\nvoid CPURenderer::set_camera(Camera& camera)\n{\n    m_camera = camera;\n    m_render_data.current_camera = camera.to_hiprt(m_resolution.x, m_resolution.y);\n}\n\nHIPRTRenderData& CPURenderer::get_render_data()\n{\n    return m_render_data;\n}\n\nHIPRTRenderSettings& CPURenderer::get_render_settings()\n{\n    return m_render_data.render_settings;\n}\n\nImage32Bit& CPURenderer::get_framebuffer()\n{\n    if (m_gmon.using_gmon)\n        return m_gmon.result_framebuffer;\n    else\n        return m_framebuffer;\n}\n\nvoid CPURenderer::render()\n{\n    std::cout << \"CPU rendering...\" << std::endl;\n\n    auto start = std::chrono::high_resolution_clock::now();\n\n    // Using 'samples_per_frame' as the number of samples to render on the CPU\n    for (int frame_number = 1; frame_number <= m_render_data.render_settings.samples_per_frame; frame_number++)\n    {\n        m_render_data.render_settings.do_update_status_buffers = true;\n\n        pre_render_update(frame_number);\n        update_render_data(frame_number);\n\n        camera_rays_pass();\n\n#if DirectLightSamplingBaseStrategy == LSS_BASE_REGIR\n        ReGIR_pass();\n#endif\n\n#if DirectLightSamplingStrategy == LSS_RESTIR_DI\n        // Only doing ReSTIR DI is ReSTIR DI is enabled \n        ReSTIR_DI_pass();\n#endif\n\n#if PathSamplingStrategy == PSS_BSDF\n        tracing_pass();\n#elif PathSamplingStrategy == PSS_RESTIR_GI\n        ReSTIR_GI_pass();\n#endif\n\n        post_sample_update(frame_number);\n\n        std::cout << \"Frame \" << frame_number << \": \" << frame_number / static_cast<float>(m_render_data.render_settings.samples_per_frame) * 100.0f << \"%\" << std::endl;\n    }\n\n    auto stop = std::chrono::high_resolution_clock::now();\n    std::cout << std::chrono::duration_cast<std::chrono::milliseconds>(stop - start).count() << \"ms\" << std::endl;\n}\n\nvoid CPURenderer::pre_render_update(int frame_number)\n{\n    // Resetting the status buffers\n    // Uploading false to reset the flag\n    *m_render_data.aux_buffers.still_one_ray_active = false;\n    // Resetting the counter of pixels converged to 0\n    m_render_data.aux_buffers.pixel_count_converged_so_far->store(0);\n\n    if (frame_number > m_render_data.nee_plus_plus.m_stop_update_samples)\n        m_render_data.nee_plus_plus.m_update_visibility_map = false;\n}\n\nvoid CPURenderer::post_sample_update(int frame_number)\n{\n    if (m_render_data.render_settings.accumulate)\n        m_render_data.render_settings.sample_number++;\n    m_render_data.random_number = m_rng.xorshift32();\n    m_render_data.render_settings.need_to_reset = false;\n    // We want the G Buffer of the frame that we just rendered to go in the \"g_buffer_prev_frame\"\n    // and then we can re-use the old buffers of to be filled by the current frame render\n\n    nee_plus_plus_memcpy_accumulation(frame_number);\n    gmon_check_for_sets_accumulation();\n    ReGIR_post_render_update();\n}\n\nvoid CPURenderer::update_render_data(int sample)\n{\n    m_render_data.prev_camera = m_render_data.current_camera;\n    m_render_data.current_camera = m_camera.to_hiprt(m_resolution.x, m_resolution.y);\n}\n\nvoid CPURenderer::reset()\n{\n    m_render_data.render_settings.need_to_reset = true;\n    m_render_data.render_settings.sample_number = 0;\n}\n\nvoid CPURenderer::debug_render_pass(std::function<void(int, int)> render_pass_function)\n{\n    // Center pixel when rendering a neighborhood\n    int center_x = 0;\n    int center_y = 0;\n\n    // If we want to debug a pixel that is not the center pixel,\n    // the coordinates will be stored there\n    int debug_x = -1;\n    int debug_y = -1;\n\n#if DEBUG_PIXEL\n\n\n#if DEBUG_FLIP_Y\n    center_x = DEBUG_PIXEL_X;\n    center_y = DEBUG_PIXEL_Y;\n\n    debug_x = center_x;\n    debug_y = center_y;\n#else // DEBUG_FLIP_Y\n    center_x = DEBUG_PIXEL_X;\n    center_y = m_resolution.y - DEBUG_PIXEL_Y - 1;\n\n    debug_x = center_x;\n    debug_y = center_y;\n#endif // DEBUG_FLIP_Y\n\n\n#if DEBUG_OTHER_PIXEL_X != -1 && DEBUG_OTHER_PIXEL_Y != -1\n#if DEBUG_OTHER_FLIP_Y\n    debug_x = DEBUG_OTHER_PIXEL_X;\n    debug_y = DEBUG_OTHER_PIXEL_Y;\n#else // DEBUG_OTHER_FLIP_Y\n    debug_x = DEBUG_OTHER_PIXEL_X;\n    debug_y = m_resolution.y - DEBUG_OTHER_PIXEL_Y - 1;\n#endif // DEBUG_OTHER_FLIP_Y\n#endif // DEBUG_OTHER_PIXEL_X != -1 && DEBUG_OTHER_PIXEL_Y != -1\n\n    // Debugging the chosen pixel first\n    render_pass_function(debug_x, debug_y);\n\n#if DEBUG_RENDER_NEIGHBORHOOD\n    // Rendering the neighborhood\n\n#pragma omp parallel for schedule(dynamic)\n    for (int render_y = std::max(0, center_y - DEBUG_NEIGHBORHOOD_SIZE); render_y <= std::min(m_resolution.y - 1, center_y + DEBUG_NEIGHBORHOOD_SIZE); render_y++)\n    {\n        for (int render_x = std::max(0, center_x - DEBUG_NEIGHBORHOOD_SIZE); render_x <= std::min(m_resolution.x - 1, center_x + DEBUG_NEIGHBORHOOD_SIZE); render_x++)\n        {\n            if (render_x == debug_x && render_y == debug_y)\n                // Skipping the pixel that we debugged to avoid rendering it twice\n                continue;\n\n            render_pass_function(render_x, render_y);\n        }\n    }\n#endif // DEBUG_RENDER_NEIGHBORHOOD\n\n#else // DEBUG_PIXEL\n\n#pragma omp parallel for schedule(dynamic)\n    for (int y = 0; y < m_resolution.y; y++)\n    {\n        for (int x = 0; x < m_resolution.x; x++)\n        {\n            if (x == debug_x && y == debug_y)\n                // Skipping the pixel that we debugged to avoid rendering it twice\n                continue;\n\n            render_pass_function(x, y);\n        }\n    }\n\n#endif // DEBUG_PIXEL\n}\n\nvoid CPURenderer::nee_plus_plus_cache_visibility_pass()\n{\n    //debug_render_pass([this](int x, int y) {\n    //    NEEPlusPlusCachingPrepass(m_render_data, /* caching sample count */ 8, x, y);\n    //});\n\n    //nee_plus_plus_memcpy_accumulation(/* frame_number */ 0);\n}\n\nvoid CPURenderer::camera_rays_pass()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n    debug_render_pass([this](int x, int y) {\n        CameraRays(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::ReGIR_pass()\n{\n    if (m_render_data.render_settings.sample_number == 0)\n        ReGIR_pre_integration();\n\n    ReGIR_presample_lights();\n\n    ReGIR_grid_fill_pass<false>(true);\n    ReGIR_grid_fill_pass<false>(false);\n\n    m_render_data.render_settings.regir_settings.actual_spatial_output_buffers_primary_hits = ReGIR_spatial_reuse_pass<false>(true);\n    m_render_data.render_settings.regir_settings.actual_spatial_output_buffers_secondary_hits = ReGIR_spatial_reuse_pass<false>(false);\n}\n\nvoid CPURenderer::ReGIR_presample_lights()\n{\n    for (int index = 0; index < m_render_data.render_settings.regir_settings.presampled_lights.get_presampled_light_count(); index++)\n    {\n        ReGIR_Light_Presampling(m_render_data, index);\n    }\n}\n\ntemplate <bool accumulatePreIntegration>\nvoid CPURenderer::ReGIR_grid_fill_pass(bool primary_hit)\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n#pragma omp parallel for\n    for (int index = 0; index < *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_count * m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(primary_hit); index++)\n    {\n        ReGIR_Grid_Fill_Temporal_Reuse<accumulatePreIntegration>(m_render_data, m_render_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit), index, *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_count, primary_hit);\n    }\n}\n\ntemplate <bool accumulatePreIntegration>\nReGIRHashGridSoADevice CPURenderer::ReGIR_spatial_reuse_pass(bool primary_hit)\n{\n    if (!m_render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n        return ReGIRHashGridSoADevice();\n\n    ReGIRHashGridSoADevice input_reservoirs = m_render_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit);\n    ReGIRHashGridSoADevice output_reservoirs = m_render_data.render_settings.regir_settings.get_raw_spatial_output_reservoirs_grid(primary_hit);\n\n    for (int i = 0; i < m_render_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_count; i++)\n    {\n        m_render_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_index = i;\n\n#pragma omp parallel for\n        for (int index = 0; index < *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_count * m_render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(primary_hit); index++)\n        {\n            ReGIR_Spatial_Reuse<accumulatePreIntegration>(m_render_data,\n                input_reservoirs,\n                output_reservoirs,\n                m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit),\n                *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_count, primary_hit, index);\n        }\n\n        std::swap(input_reservoirs, output_reservoirs);\n    }\n\n    // Returning the reservoirs into which the spatial reuse pass last output the result\n    //\n    // This is the 'input' buffer and not 'output' because of the std::swap that happens on the last iteration\n    return input_reservoirs;\n}\n\nvoid CPURenderer::ReGIR_pre_integration()\n{\n    // 2 iterations: 1 for the primary hits, 1 for the secondary hits\n    for (int i = 0; i < 2; i++)\n    {\n        bool primary_hit = (i == 0);\n\n        unsigned int seed_backup = m_render_data.random_number;\n        unsigned int nb_cells_alive = *m_render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit).grid_cells_alive_count;\n        unsigned int nb_threads = nb_cells_alive;\n\n        for (int i = 0; i < m_render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS; i++)\n        {\n            m_render_data.random_number = m_rng.xorshift32();\n\n            ReGIR_presample_lights();\n            ReGIR_grid_fill_pass<true>(primary_hit);\n            ReGIR_spatial_reuse_pass<true>(primary_hit);\n        }\n\n        m_render_data.random_number = seed_backup;\n    }\n}\n\nvoid CPURenderer::ReSTIR_DI_pass()\n{\n    launch_ReSTIR_DI_presampling_lights_pass();\n    launch_ReSTIR_DI_initial_candidates_pass();\n\n    if (m_render_data.render_settings.restir_di_settings.do_fused_spatiotemporal)\n        // If fused-spatiotemporal\n        // Also not doing it on the very first frame as we would get no samples through\n        launch_ReSTIR_DI_spatiotemporal_reuse_pass();\n    else\n    {\n        if (m_render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n            launch_ReSTIR_DI_temporal_reuse_pass();\n\n        if (m_render_data.render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n            for (int spatial_reuse_pass = 0; spatial_reuse_pass < m_render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes; spatial_reuse_pass++)\n                launch_ReSTIR_DI_spatial_reuse_pass(spatial_reuse_pass);\n    }\n\n    configure_ReSTIR_DI_output_buffer();\n    m_restir_di_state.odd_frame = !m_restir_di_state.odd_frame;\n}\n\nvoid CPURenderer::ReSTIR_GI_pass()\n{\n    compute_ReSTIR_GI_optimal_spatial_reuse_radii();\n\n    configure_ReSTIR_GI_initial_candidates_pass();\n    launch_ReSTIR_GI_initial_candidates_pass();\n\n    configure_ReSTIR_GI_temporal_reuse_pass();\n    launch_ReSTIR_GI_temporal_reuse_pass();\n\n    for (int i = 0; i < m_render_data.render_settings.restir_gi_settings.common_spatial_pass.number_of_passes; i++)\n    {\n        configure_ReSTIR_GI_spatial_reuse_pass(i);\n        launch_ReSTIR_GI_spatial_reuse_pass();\n    }\n\n    configure_ReSTIR_GI_shading_pass();\n    launch_ReSTIR_GI_shading_pass();\n}\n\nLightPresamplingParameters CPURenderer::configure_ReSTIR_DI_light_presampling_pass()\n{\n    LightPresamplingParameters parameters;\n\n    /**\n     * Parameters specific to the kernel\n     */\n\n     // From all the lights of the scene, how many subsets to presample\n    parameters.number_of_subsets = m_render_data.render_settings.restir_di_settings.light_presampling.number_of_subsets;\n    // How many lights to presample in each subset\n    parameters.subset_size = m_render_data.render_settings.restir_di_settings.light_presampling.subset_size;\n    // Buffer that holds the presampled lights\n    parameters.out_light_samples = m_restir_di_state.presampled_lights_buffer.data();\n\n    // For each presampled light, the probability that this is going to be an envmap sample\n    parameters.envmap_sampling_probability = m_render_data.render_settings.restir_di_settings.initial_candidates.envmap_candidate_probability;\n\n    m_render_data.random_number = m_rng.xorshift32();\n\n    return parameters;\n}\n\nvoid CPURenderer::compute_ReSTIR_DI_optimal_spatial_reuse_radii()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_Directional_Reuse_Compute<false>(m_render_data, x, y,\n            m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u,\n            m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull,\n            m_render_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_radius);\n        });\n}\n\nvoid CPURenderer::launch_ReSTIR_DI_presampling_lights_pass()\n{\n    if (ReSTIR_DI_DoLightPresampling == KERNEL_OPTION_TRUE)\n    {\n        LightPresamplingParameters launch_parameters = configure_ReSTIR_DI_light_presampling_pass();\n\n        for (int index = 0; index < launch_parameters.number_of_subsets * launch_parameters.subset_size; index++)\n            ReSTIR_DI_LightsPresampling(launch_parameters, m_render_data, index);\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_initial_pass()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n    m_render_data.render_settings.restir_di_settings.light_presampling.light_samples = m_restir_di_state.presampled_lights_buffer.data();\n    m_render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs = m_restir_di_state.initial_candidates_reservoirs.data();\n}\n\nvoid CPURenderer::launch_ReSTIR_DI_initial_candidates_pass()\n{\n    configure_ReSTIR_DI_initial_pass();\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_DI_InitialCandidates(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_temporal_pass()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n    m_render_data.render_settings.restir_di_settings.common_temporal_pass.permutation_sampling_random_bits = m_rng.xorshift32();\n\n    // The input of the temporal pass is the output of last frame's\n    // ReSTIR (and also the initial candidates but this is implicit\n    // and \"hardcoded in the shader\"\n    m_render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs = m_render_data.render_settings.restir_di_settings.restir_output_reservoirs;\n\n    if (m_render_data.render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n        // If we're going to do spatial reuse, reuse the initial\n        // candidate reservoirs to store the output of the temporal pass.\n        // The spatial reuse pass will read form that buffer.\n        // \n        // Reusing the initial candidates buffer (which is an input\n        // to the temporal pass) as the output is legal and does not\n        // cause a race condition because a given pixel only read and\n        // writes to its own pixel in the initial candidates buffer.\n        // We're not risking another pixel reading in someone else's\n        // pixel in the initial candidates buffer while we write into\n        // it (that would be a race condition)\n        m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = m_restir_di_state.initial_candidates_reservoirs.data();\n    else\n    {\n        // Else, no spatial reuse, the output of the temporal pass is going to be in its own buffer.\n        // Alternatively using spatial_output_reservoirs_1 and spatial_output_reservoirs_2 to avoid race conditions\n        if (m_restir_di_state.odd_frame)\n            m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n        else\n            m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_2.data();\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_temporal_pass_for_fused_spatiotemporal()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n    m_render_data.render_settings.restir_di_settings.common_temporal_pass.permutation_sampling_random_bits = m_rng.xorshift32();\n\n    // The input of the temporal pass is the output of last frame's\n    // ReSTIR (and also the initial candidates but this is implicit\n    // and hardcoded in the shader)\n    if (m_restir_di_state.odd_frame)\n        m_render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n    else\n        m_render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs = m_restir_di_state.spatial_output_reservoirs_2.data();\n\n    // Not needed. In the fused spatiotemporal pass, everything is output by the spatial pass\n    m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = nullptr;\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_spatial_pass(int spatial_pass_index)\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n    if (spatial_pass_index == 0)\n    {\n        if (m_render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n            // For the first spatial reuse pass, we hardcode reading from the output of the temporal pass and storing into 'spatial_output_reservoirs_1'\n            m_render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs;\n        else\n            // If there is no temporal reuse pass, using the initial candidates as the input to the spatial reuse pass\n            m_render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = m_render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs;\n\n        m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n    }\n    else\n    {\n        // And then, starting at the second spatial reuse pass, we read from the output of the previous spatial pass and store\n        // in either spatial_output_reservoirs_1 or spatial_output_reservoirs_2, depending on which one isn't the input (we don't\n        // want to store in the same buffers that is used for output because that's a race condition so\n        // we're ping-ponging between the two outputs of the spatial reuse pass)\n\n        if ((spatial_pass_index & 1) == 0)\n        {\n            m_render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = m_restir_di_state.spatial_output_reservoirs_2.data();\n            m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n        }\n        else\n        {\n            m_render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n            m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_2.data();\n\n        }\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_spatial_pass_for_fused_spatiotemporal(int spatial_pass_index)\n{\n    if (spatial_pass_index == 0)\n    {\n        // The input of the spatial resampling in the fused spatiotemporal pass is the\n        // temporal buffer of the last frame i.e. the input to the temporal pass\n        //\n        // Note, this line of code below assumes that the temporal pass was configured\n        // prior to calling this function such that\n        // 'm_render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs'\n        // is the proper pointer\n        m_render_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = m_render_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs;\n\n        if (m_restir_di_state.odd_frame)\n            m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_2.data();\n        else\n            m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = m_restir_di_state.spatial_output_reservoirs_1.data();\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_spatiotemporal_pass()\n{\n    // The buffers of the temporal pass are going to be configured in the same way\n    configure_ReSTIR_DI_temporal_pass_for_fused_spatiotemporal();\n\n    // But the spatial pass is going to read from the input of the temporal pass i.e. the temporal buffer of the last frame, it's not going to read from the output of the temporal pass\n    configure_ReSTIR_DI_spatial_pass_for_fused_spatiotemporal(0);\n}\n\nvoid CPURenderer::configure_ReSTIR_DI_output_buffer()\n{\n    // Keeping in mind which was the buffer used last for the output of the spatial reuse pass as this is the buffer that\n        // we're going to use as the input to the temporal reuse pass of the next frame\n    if (m_render_data.render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n        // If there was spatial reuse, using the output of the spatial reuse pass as the input of the temporal\n        // pass of next frame\n        m_render_data.render_settings.restir_di_settings.restir_output_reservoirs = m_render_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs;\n    else if (m_render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n        // If there was a temporal reuse pass, using that output as the input of the next temporal reuse pass\n        m_render_data.render_settings.restir_di_settings.restir_output_reservoirs = m_render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs;\n    else\n        // No spatial or temporal, the output of ReSTIR is just the output of the initial candidates pass\n        m_render_data.render_settings.restir_di_settings.restir_output_reservoirs = m_render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs;\n}\n\nvoid CPURenderer::launch_ReSTIR_DI_temporal_reuse_pass()\n{\n    configure_ReSTIR_DI_temporal_pass();\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_DI_TemporalReuse(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::launch_ReSTIR_DI_spatial_reuse_pass(int spatial_reuse_pass_index)\n{\n    configure_ReSTIR_DI_spatial_pass(spatial_reuse_pass_index);\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_DI_SpatialReuse(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::launch_ReSTIR_DI_spatiotemporal_reuse_pass()\n{\n    configure_ReSTIR_DI_spatiotemporal_pass();\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_DI_SpatiotemporalReuse(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::tracing_pass()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n    debug_render_pass([this](int x, int y) {\n        MegaKernel(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::compute_ReSTIR_GI_optimal_spatial_reuse_radii()\n{\n    m_render_data.random_number = m_rng.xorshift32();\n\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_Directional_Reuse_Compute<true>(m_render_data, x, y,\n            m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u,\n            m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull,\n            m_render_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius);\n        });\n}\n\nvoid CPURenderer::configure_ReSTIR_GI_initial_candidates_pass()\n{\n    m_render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer = m_restir_gi_state.initial_candidates_reservoirs.data();\n\n    m_render_data.random_number = m_rng.xorshift32();\n}\n\nstatic unsigned int seed;\n\nvoid CPURenderer::launch_ReSTIR_GI_initial_candidates_pass()\n{\n    seed = m_render_data.random_number;\n\n    if (m_render_data.render_settings.nb_bounces > 0)\n    {\n        debug_render_pass([this](int x, int y) {\n            ReSTIR_GI_InitialCandidates(m_render_data, x, y);\n            });\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_GI_temporal_reuse_pass()\n{\n    if (m_render_data.render_settings.sample_number == 0)\n        // First frame, using the initial candidates as the input\n        m_render_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs = m_render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer;\n    else\n        // Not the first frame, the input to the temporal pass is the output of the last frame ReSTIR\n        m_render_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs = m_render_data.render_settings.restir_gi_settings.restir_output_reservoirs;\n\n    // For the output, using whatever buffer isn't the one we're reading from (the input buffer)\n    if (m_render_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs == m_restir_gi_state.temporal_reservoirs.data())\n        m_render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs = m_restir_gi_state.spatial_reservoirs.data();\n    else\n        m_render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs = m_restir_gi_state.temporal_reservoirs.data();\n\n    m_render_data.random_number = m_rng.xorshift32();\n}\n\nvoid CPURenderer::launch_ReSTIR_GI_temporal_reuse_pass()\n{\n    if (m_render_data.render_settings.nb_bounces > 0 && m_render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n    {\n        debug_render_pass([this](int x, int y) {\n            ReSTIR_GI_TemporalReuse(m_render_data, x, y);\n            });\n    }\n}\n\nvoid CPURenderer::configure_ReSTIR_GI_spatial_reuse_pass(int spatial_pass_index)\n{\n    m_render_data.render_settings.restir_gi_settings.common_spatial_pass.spatial_pass_index = spatial_pass_index;\n\n    // The spatial reuse pass spatially reuse on the output of the temporal pass in the 'temporal buffer' and\n    // stores in the 'spatial buffer'\n\n    ReSTIRGIReservoir* input_reservoirs;\n    ReSTIRGIReservoir* output_reservoirs;\n\n    if (spatial_pass_index > 0)\n        // If this is the second spatial reuse pass or more, reading from the output of the previous pass\n        input_reservoirs = m_render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs;\n    else\n    {\n        // This is the first spatial reuse pass, reading from the output of the temporal pass\n        // or the initial candidates depending on whether or not we have a temporal reuse pass at all\n\n        if (m_render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n            // and we have a temporal reuse pass so we're going to read from the temporal reservoirs\n            input_reservoirs = m_render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs;\n        else\n            // and we do not have a temporal reuse pass so we're just going to read from the initial candidates\n            input_reservoirs = m_restir_gi_state.initial_candidates_reservoirs.data();\n    }\n\n    // Outputting to whichever reservoir we're not reading from to avoid race conditions\n    if (input_reservoirs == m_restir_gi_state.temporal_reservoirs.data())\n        output_reservoirs = m_restir_gi_state.spatial_reservoirs.data();\n    else\n        output_reservoirs = m_restir_gi_state.temporal_reservoirs.data();\n\n    m_render_data.render_settings.restir_gi_settings.spatial_pass.input_reservoirs = input_reservoirs;\n    m_render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs = output_reservoirs;\n\n    m_render_data.random_number = m_rng.xorshift32();\n}\n\nvoid CPURenderer::launch_ReSTIR_GI_spatial_reuse_pass()\n{\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_GI_SpatialReuse(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::configure_ReSTIR_GI_shading_pass()\n{\n    if (m_render_data.render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass)\n        m_render_data.render_settings.restir_gi_settings.restir_output_reservoirs = m_render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs;\n    else if (m_render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n        m_render_data.render_settings.restir_gi_settings.restir_output_reservoirs = m_render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs;\n    else\n        m_render_data.render_settings.restir_gi_settings.restir_output_reservoirs = m_render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer;\n\n    m_render_data.random_number = seed;\n}\n\nvoid CPURenderer::launch_ReSTIR_GI_shading_pass()\n{\n    debug_render_pass([this](int x, int y) {\n        ReSTIR_GI_Shading(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::gmon_compute_median_of_means()\n{\n    debug_render_pass([this](int x, int y) {\n        GMoNComputeMedianOfMeans(m_render_data, x, y);\n        });\n}\n\nvoid CPURenderer::tonemap(float gamma, float exposure)\n{\n    ColorRGB32F* framebuffer_data = get_framebuffer().get_data_as_ColorRGB32F();\n\n#pragma omp parallel for schedule(dynamic)\n    for (int y = 0; y < m_resolution.y; y++)\n    {\n        for (int x = 0; x < m_resolution.x; x++)\n        {\n            int index = x + y * m_resolution.x;\n\n            ColorRGB32F hdr_color = framebuffer_data[index];\n\n            if (m_render_data.render_settings.accumulate)\n                // Scaling by sample count\n                hdr_color = hdr_color / float(m_render_data.render_settings.sample_number);\n\n            ColorRGB32F tone_mapped = ColorRGB32F(1.0f) - exp(-hdr_color * exposure);\n            tone_mapped = pow(tone_mapped, 1.0f / gamma);\n\n            framebuffer_data[index] = tone_mapped;\n        }\n    }\n}\n"
  },
  {
    "path": "src/Renderer/CPURenderer.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef CPU_RENDERER_H\n#define CPU_RENDERER_H\n\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/ReGIR/Settings.h\"\n#include \"Device/kernel_parameters/ReSTIR/DI/LightPresamplingParameters.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include \"Image/Image.h\"\n#include \"Image/EnvmapRGBE9995.h\"\n#include \"Renderer/BVH.h\"\n#include \"Renderer/CPUDataStructures/GBufferCPUData.h\"\n#include \"Renderer/CPUDataStructures/GMoNCPUData.h\"\n#include \"Renderer/CPUDataStructures/NEEPlusPlusCPUData.h\"\n#include \"Renderer/CPUDataStructures/MaterialPackedSoACPUData.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/PrecomputedEmissiveTrianglesDataSoAHost.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRHashGridSoAHost.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRHashCellDataSoAHost.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRPresampledLightsSoAHost.h\"\n#include \"Scene/SceneParser.h\"\n#include \"Utils/CommandlineArguments.h\"\n\n#include <functional>\n#include <memory>\n#include <vector>\n\nclass CPURenderer\n{\npublic:\n    CPURenderer(int width, int height);\n\n    void setup_brdfs_data();\n    void setup_nee_plus_plus();\n    void setup_gmon();\n    void nee_plus_plus_memcpy_accumulation(int frame_number);\n    void gmon_check_for_sets_accumulation();\n    void ReGIR_post_render_update();\n\n    void set_scene(Scene& parsed_scene);\n    void compute_emissives_power_alias_table(const Scene& scene);\n    void set_envmap(Image32Bit& envmap_image);\n    void set_camera(Camera& camera);\n\n    HIPRTRenderData& get_render_data();\n    HIPRTRenderSettings& get_render_settings();\n    Image32Bit& get_framebuffer();\n\n    void render();\n    void pre_render_update(int frame_number);\n    void post_sample_update(int frame_number);\n    void update_render_data(int sample);\n\n    void reset();\n\n    void debug_render_pass(std::function<void(int, int)> render_pass_function);\n\n    void nee_plus_plus_cache_visibility_pass();\n    void camera_rays_pass();\n    void ReGIR_pass();\n    void ReGIR_presample_lights();\n    void ReSTIR_DI_pass();\n    void ReSTIR_GI_pass();\n\n    template <bool accumulatePreIntegration>\n    void ReGIR_grid_fill_pass(bool primary_hit);\n    template <bool accumulatePreIntegration>\n    ReGIRHashGridSoADevice ReGIR_spatial_reuse_pass(bool primary_hit);\n    void ReGIR_pre_integration();\n\n    LightPresamplingParameters configure_ReSTIR_DI_light_presampling_pass();\n    void configure_ReSTIR_DI_initial_pass();\n\n    void launch_ReSTIR_DI_presampling_lights_pass();\n    void launch_ReSTIR_DI_initial_candidates_pass();\n\n    void compute_ReSTIR_DI_optimal_spatial_reuse_radii();\n    void configure_ReSTIR_DI_temporal_pass();\n    void configure_ReSTIR_DI_temporal_pass_for_fused_spatiotemporal();\n    void configure_ReSTIR_DI_spatial_pass(int spatial_pass_index);\n    void configure_ReSTIR_DI_spatial_pass_for_fused_spatiotemporal(int spatial_pass_index);\n    void configure_ReSTIR_DI_spatiotemporal_pass();\n\tvoid configure_ReSTIR_DI_output_buffer();\n\n    void launch_ReSTIR_DI_temporal_reuse_pass();\n    void launch_ReSTIR_DI_spatial_reuse_pass(int spatial_reuse_pass_index);\n    void launch_ReSTIR_DI_spatiotemporal_reuse_pass();\n\n    void tracing_pass();\n\n    void compute_ReSTIR_GI_optimal_spatial_reuse_radii();\n    void configure_ReSTIR_GI_initial_candidates_pass();\n    void configure_ReSTIR_GI_temporal_reuse_pass();\n    void configure_ReSTIR_GI_spatial_reuse_pass(int spatial_reuse_pass_index);\n    void configure_ReSTIR_GI_shading_pass();\n\n    void launch_ReSTIR_GI_initial_candidates_pass();\n    void launch_ReSTIR_GI_temporal_reuse_pass();\n    void launch_ReSTIR_GI_spatial_reuse_pass();\n    void launch_ReSTIR_GI_shading_pass();\n\n    void gmon_compute_median_of_means();\n\n    void tonemap(float gamma, float exposure);\n\nprivate:\n    int2 m_resolution;\n\n    Image32Bit m_framebuffer;\n    std::vector<unsigned char> m_pixel_active_buffer;\n    std::vector<ColorRGB32F> m_denoiser_albedo;\n    std::vector<float3> m_denoiser_normals;\n\n    std::vector<int> m_pixel_sample_count;\n    std::vector<int> m_pixel_converged_sample_count;\n    std::vector<float> m_pixel_squared_luminance;\n    unsigned char m_still_one_ray_active = true;\n    AtomicType<unsigned int> m_stop_noise_threshold_count;\n\n    RGBE9995Envmap<false> m_packed_envmap;\n    std::vector<float> m_envmap_cdf;\n    std::vector<float> m_envmap_alias_table_probas;\n    std::vector<int> m_envmap_alias_table_alias;\n\n    std::vector<float> m_power_alias_table_probas;\n    std::vector<int> m_power_alias_table_alias;\n    // This is a remnant of some tests and it was actually not worth it\n\tPrecomputedEmissiveTrianglesDataSoAHost<std::vector> m_precomputed_emissive_triangles_data;\n\n    NEEPlusPlusCPUData m_nee_plus_plus;\n\n    GMoNCPUData m_gmon;\n\n    DevicePackedTexturedMaterialSoACPUData m_gpu_packed_materials;\n    // Keeps track of which material is fully opaque or not\n    std::vector<unsigned char> m_material_opaque;\n\n    GBufferCPUData m_g_buffer;\n    GBufferCPUData m_g_buffer_prev_frame;\n\n    // Random number generator for given a random seed to the threads at each sample\n    Xorshift32Generator m_rng;\n\n    struct ReSTIRDIState\n    {\n        std::vector<ReSTIRDIReservoir> initial_candidates_reservoirs;\n        std::vector<ReSTIRDIReservoir> spatial_output_reservoirs_1;\n        std::vector<ReSTIRDIReservoir> spatial_output_reservoirs_2;\n        std::vector<ReSTIRDIPresampledLight> presampled_lights_buffer;\n\n        std::vector<unsigned int> per_pixel_spatial_reuse_directions_mask_u;\n        std::vector<unsigned long long int> per_pixel_spatial_reuse_directions_mask_ull;\n        std::vector<unsigned char> per_pixel_spatial_reuse_radius;\n\n        AtomicType<unsigned long long int> spatial_reuse_hit_rate_hits;\n        AtomicType<unsigned long long int> spatial_reuse_hit_rate_total;\n\n        ReSTIRDIReservoir* output_reservoirs = nullptr;\n\n        bool odd_frame = false;\n    } m_restir_di_state;\n\n    struct ReSTIRGIState\n    {\n        std::vector<ReSTIRGIReservoir> initial_candidates_reservoirs;\n        std::vector<ReSTIRGIReservoir> temporal_reservoirs;\n        std::vector<ReSTIRGIReservoir> spatial_reservoirs;\n\n        std::vector<unsigned int> per_pixel_spatial_reuse_directions_mask_u;\n        std::vector<unsigned long long int> per_pixel_spatial_reuse_directions_mask_ull;\n        std::vector<unsigned char> per_pixel_spatial_reuse_radius;\n\n        AtomicType<unsigned long long int> spatial_reuse_hit_rate_hits;\n        AtomicType<unsigned long long int> spatial_reuse_hit_rate_total;\n    } m_restir_gi_state;\n\n    struct ReGIRState\n    {\n        ReGIRPresampledLightsSoAHost<std::vector> presampled_lights;\n\n        ReGIRHashGridSoAHost<std::vector> grid_buffer_primary_hit;\n        ReGIRHashGridSoAHost<std::vector> spatial_grid_buffer_primary_hit;\n        ReGIRHashCellDataSoAHost<std::vector> hash_cell_data_primary_hit;\n\n        ReGIRHashGridSoAHost<std::vector> grid_buffer_secondary_hit;\n        ReGIRHashGridSoAHost<std::vector> spatial_grid_buffer_secondary_hit;\n        ReGIRHashCellDataSoAHost<std::vector> hash_cell_data_secondary_hit;\n\n        ReGIRHashGridSoAHost<std::vector> correlation_reduction_grid;\n\n        std::vector<AtomicType<float>> non_canonical_pre_integration_factors_primary_hit;\n        std::vector<AtomicType<float>> canonical_pre_integration_factors_primary_hit;\n\n        std::vector<AtomicType<float>> non_canonical_pre_integration_factors_secondary_hit;\n        std::vector<AtomicType<float>> canonical_pre_integration_factors_secondary_hit;\n\n        std::vector<AtomicType<unsigned int>> grid_cell_alive;\n        std::vector<unsigned int> grid_cells_alive_list;\n        AtomicType<unsigned int> grid_cells_alive_count;\n    } m_regir_state;\n\n    Image32Bit m_sheen_ltc_params;\n    Image32Bit m_GGX_conductor_directional_albedo;\n    Image32Bit3D m_glossy_dielectrics_directional_albedo;\n    Image32Bit3D m_GGX_glass_directional_albedo;\n    Image32Bit3D m_GGX_glass_inverse_directional_albedo;\n    Image32Bit3D m_GGX_thin_glass_directional_albedo;\n\n    std::vector<Triangle> m_triangle_buffer;\n    std::vector<Triangle> m_emissive_triangles_buffer;\n    std::shared_ptr<BVH> m_bvh;\n    std::shared_ptr<BVH> m_light_bvh;\n\n    Camera m_camera;\n    HIPRTRenderData m_render_data;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/DenoiserBuffersGPUData.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPUDataStructures/DenoiserBuffersGPUData.h\"\n#include \"Renderer/GPURenderer.h\"\n\nfloat3* DenoiserBuffersGPUData::map_normals_buffer()\n{\n\tif (use_interop_AOVs)\n\t\treturn m_normals_AOV_interop_buffer->map();\n\telse\n\t\treturn m_normals_AOV_no_interop_buffer->get_device_pointer();\n}\n\nvoid DenoiserBuffersGPUData::resize_normals_buffer(size_t new_element_count)\n{\n\tif (use_interop_AOVs)\n\t\tm_normals_AOV_interop_buffer->resize(new_element_count);\n\telse\n\t\tm_normals_AOV_no_interop_buffer->resize(new_element_count);\n}\n\nvoid DenoiserBuffersGPUData::unmap_normals_buffer()\n{\n\tif (use_interop_AOVs)\n\t\tm_normals_AOV_interop_buffer->unmap();\n}\n\nColorRGB32F* DenoiserBuffersGPUData::map_albedo_buffer()\n{\n\tif (use_interop_AOVs)\n\t\treturn m_albedo_AOV_interop_buffer->map();\n\telse\n\t\treturn m_albedo_AOV_no_interop_buffer->get_device_pointer();\n}\n\nvoid DenoiserBuffersGPUData::resize_albedo_buffer(size_t new_element_count)\n{\n\tif (use_interop_AOVs)\n\t\tm_albedo_AOV_interop_buffer->resize(new_element_count);\n\telse\n\t\tm_albedo_AOV_no_interop_buffer->resize(new_element_count);\n}\n\nvoid DenoiserBuffersGPUData::unmap_albedo_buffer()\n{\n\tif (use_interop_AOVs)\n\t\tm_albedo_AOV_interop_buffer->unmap();\n}\n\nvoid DenoiserBuffersGPUData::set_use_interop_AOV_buffers(GPURenderer* renderer, bool use_interop)\n{\n\tif (use_interop == use_interop_AOVs)\n\t\t// Nothing to change\n\t\treturn;\n\n\trenderer->synchronize_all_kernels();\n\n\tuse_interop_AOVs = use_interop;\n\n\tif (use_interop_AOVs)\n\t{\n\t\tm_normals_AOV_interop_buffer->resize(m_normals_AOV_no_interop_buffer->size());\n\t\tm_albedo_AOV_interop_buffer->resize(m_albedo_AOV_no_interop_buffer->size());\n\n\t\tm_normals_AOV_no_interop_buffer->free();\n\t\tm_albedo_AOV_no_interop_buffer->free();\n\t}\n\telse\n\t{\n\t\tm_normals_AOV_no_interop_buffer->resize(m_normals_AOV_interop_buffer->size());\n\t\tm_albedo_AOV_no_interop_buffer->resize(m_albedo_AOV_interop_buffer->size());\n\n\t\tm_normals_AOV_interop_buffer->free();\n\t\tm_albedo_AOV_interop_buffer->free();\n\t}\n}\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/DenoiserBuffersGPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_RENDERER_DENOISER_BUFFERS_H\n#define GPU_RENDERER_DENOISER_BUFFERS_H\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"OpenGL/OpenGLInteropBuffer.h\"\n#include \"UI/ApplicationSettings.h\"\n\nclass GPURenderer;\n\nstruct DenoiserBuffersGPUData\n{\n\tfloat3* map_normals_buffer();\n\tvoid resize_normals_buffer(size_t new_element_count);\n\tvoid unmap_normals_buffer();\n\n\tColorRGB32F* map_albedo_buffer();\n\tvoid resize_albedo_buffer(size_t new_element_count);\n\tvoid unmap_albedo_buffer();\n\n\tvoid set_use_interop_AOV_buffers(GPURenderer* renderer, bool use_interop);\n\n\t// Buffer for holding the denoised frame (the denoiser data will be copied\n\t\t// to this buffer and then displayed to the viewport)\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> m_denoised_framebuffer;\n\t// Normals G-buffer\n\tstd::shared_ptr<OpenGLInteropBuffer<float3>> m_normals_AOV_interop_buffer;\n\tstd::shared_ptr<OrochiBuffer<float3>> m_normals_AOV_no_interop_buffer;\n\t// Albedo G-buffer\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>>m_albedo_AOV_interop_buffer;\n\tstd::shared_ptr<OrochiBuffer<ColorRGB32F>>m_albedo_AOV_no_interop_buffer;\n\n\tbool use_interop_AOVs = ApplicationSettings::DENOISER_USE_INTEROP_BUFFERS_DEFAULT;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/GBufferGPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef G_BUFFER_GPU_RENDERER_H\n#define G_BUFFER_GPU_RENDERER_H\n\n#include \"Device/includes/GBufferDevice.h\"\n#include \"Device/includes/RayVolumeState.h\"\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n\n// GBuffer that stores information about the current frame first hit data\nstruct GBufferGPURenderer\n{\n\tvoid resize(unsigned int new_element_count, size_t ray_volume_state_byte_size)\n\t{\n\t\tmaterials.resize(new_element_count);\n\t\tgeometric_normals.resize(new_element_count);\n\t\tshading_normals.resize(new_element_count);\n\t\tprimary_hit_position.resize(new_element_count);\n\t\tfirst_hit_prim_index.resize(new_element_count);\n\n\t\t// We need to be careful here because the ray volume states contain the nested dielectric stack and the stack size can be changed at runtime through ImGui. However, on the CPU, the stack size is determined at compile time. Changing the stack size through ImGui only resizes the GPU shaders which then adapts to the new stack size thanks to the recompilation. However, on the CPU, we're not recompiling anything. This means that the stack size on the CPU doesn't match the stack size on the GPU anymore and the buffer will not be properly resized --> this is huge undefined behavior.\n\t\t// To avoid that, we're manually giving the size here for resizing\n\t\tray_volume_states.resize(new_element_count, ray_volume_state_byte_size);\n\t}\n\n\tvoid free()\n\t{\n\t\tmaterials.free();\n\t\tgeometric_normals.free();\n\t\tshading_normals.free();\n\t\tprimary_hit_position.free();\n\t\tfirst_hit_prim_index.free();\n\t\tray_volume_states.free();\n\t}\n\n\tGBufferDevice get_device_g_buffer()\n\t{\n\t\tGBufferDevice out;\n\n\t\tout.materials = materials.get_device_pointer();\n\t\tout.geometric_normals = geometric_normals.get_device_pointer();\n\t\tout.shading_normals = shading_normals.get_device_pointer();\n\t\tout.primary_hit_position = primary_hit_position.get_device_pointer();\n\t\tout.first_hit_prim_index = first_hit_prim_index.get_device_pointer();\n\n\t\treturn out;\n\t}\n\n\tOrochiBuffer<DevicePackedEffectiveMaterial> materials;\n\n\tOrochiBuffer<Octahedral24BitNormalPadded32b> shading_normals;\n\tOrochiBuffer<Octahedral24BitNormalPadded32b> geometric_normals;\n\tOrochiBuffer<float3> primary_hit_position;\n\tOrochiBuffer<int> first_hit_prim_index;\n\n\tOrochiBuffer<RayVolumeState> ray_volume_states;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/GMoNGPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_GMON_GPU_DATA_H\n#define RENDERER_GMON_GPU_DATA_H\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"OpenGL/OpenGLInteropBuffer.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/GMoNCPUGPUCommonData.h\"\n\n#include <memory>\n#include <vector>\n\n/**\n * CPU-side data structure for the implementation of GMoN on the GPU\n *\n * Reference:\n * [1] [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\n */\nstruct GMoNGPUData : public GMoNCPUGPUCommonData\n{\n\tGMoNGPUData()\n\t{\n\t\tresult_framebuffer = std::make_shared<OpenGLInteropBuffer<ColorRGB32F>>();\n\t}\n\n\tvoid resize_sets(unsigned int render_width, unsigned int render_height, unsigned int number_of_sets)\n\t{\n\t\tsets.resize(render_width * render_height * number_of_sets);\n\n\t\tcurrent_resolution = make_int2(render_width, render_height);\n\t\tcurrent_number_of_sets = number_of_sets;\n\t}\n\n\tvoid resize_interop(unsigned int new_width, unsigned int new_height)\n\t{\n\t\tresult_framebuffer->resize(new_width * new_height);\n\t}\n\n\tvoid free()\n\t{\n\t\tsets.free();\n\t\tresult_framebuffer->free();\n\n\t\tcurrent_resolution = make_int2(0, 0);\n\t}\n\n\tbool is_freed() const\n\t{\n\t\treturn sets.size() == 0 && result_framebuffer->size() == 0;\n\t}\n\n\tColorRGB32F* map_result_framebuffer()\n\t{\n\t\tif (using_gmon)\n\t\t\treturn result_framebuffer->map();\n\t\telse\n\t\t\treturn nullptr;\n\t}\n\n\tunsigned int get_VRAM_usage_bytes() const\n\t{\n\t\tunsigned int nb_pixels = current_resolution.x * current_resolution.y;\n\n\t\tunsigned int bytes_result_framebuffer = nb_pixels * sizeof(ColorRGB32F);\n\t\tunsigned int bytes_sets = nb_pixels * sizeof(ColorRGB32F) * current_number_of_sets;\n\n\t\treturn bytes_result_framebuffer + bytes_sets;\n\t}\n\n\t// This is one very big buffer that contains all the sets we accumulate into for GMoN\n\t//\n\t// For example, if GMoNMSets == 5 and a render resolution of 1280x720,\n\t// this is going to be a buffer that is 1280*720*5 elements long\n\tOrochiBuffer<ColorRGB32F> sets;\n\n\t// This is the buffer that contains the G-median of means result of each pixel and this is going\n\t// to be displayed in the viewport instead of the regular framebuffer if GMoN is being used\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> result_framebuffer = nullptr;\n\n\t// These two variables are used for lazy GMoN recomputation:\n\t// \n\t// GMoN isn't recomputed at each sample because we need at least one new sample\n\t// in each set of GMoN to be able to recompute the median of means.\n\t// So we should recompute GMoN only every M samples (for M GMoN sets).\n\t//\n\t// Even then, that's not exactly what we're doing because recomputing GMoN\n\t// is a little bit expensive but the viewport of the render window is only\n\t// refreshed every 5s (the timer varies) so this means that we only need to\n\t// recompute GMoN every 5s, not every M samples\n\t//\n\t// GMoNRenderPass:request_refresh() sets 'm_gmon_recomputation_requested' to true.\n\t// If \n\tbool m_gmon_recomputed = false;\n\tbool m_gmon_recomputation_requested = false;\n\n\t// How many samples were we at when last launched the GMoN kernel\n\tunsigned int last_recomputed_sample_count = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/MaterialPackedSoAGPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_GPU_DATA_H\n#define HOST_DEVICE_COMMON_MATERIAL_PACKED_SOA_GPU_DATA_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"HostDeviceCommon/Material/MaterialPacked.h\"\n\n#define DECLARE_ALL_MEMBERS_STD_TIE                                     \\\n  auto all_members = std::tie(                                          \\\n      normal_map_emission_index, base_color_roughness_metallic_index,   \\\n      roughness_and_metallic_index, anisotropic_specular_index,         \\\n      coat_sheen_index, specular_transmission_index,                    \\\n                                                                        \\\n      flags,                                                            \\\n                                                                        \\\n      emission,                                                         \\\n                                                                        \\\n      base_color_roughness,                                             \\\n                                                                        \\\n      oren_nayar_sigma,                                                 \\\n                                                                        \\\n      metallic_F90_and_metallic, metallic_F82_packed_and_diffuse_transmission,\\\n      metallic_F90_falloff_exponent,                                    \\\n      anisotropy_and_rotation_and_second_roughness,                     \\\n                                                                        \\\n      specular_color_and_tint_factor,                                   \\\n      specular_and_darkening_and_coat_roughness, coat_medium_thickness, \\\n      coat_and_medium_absorption,                                       \\\n      coat_roughening_darkening_anisotropy_and_rotation, coat_ior,      \\\n                                                                        \\\n      sheen_and_color,                                                  \\\n                                                                        \\\n      ior, absorption_color_packed, absorption_at_distance,             \\\n                                                                        \\\n      sheen_roughness_transmission_dispersion_thin_film,                \\\n                                                                        \\\n      dispersion_abbe_number, thin_film_ior, thin_film_thickness,       \\\n      thin_film_kappa_3, thin_film_base_ior_override,                   \\\n      alpha_thin_film_hue_dielectric_priority);\n\n/**\n * These two structures here are just there to hold all the buffers created on the GPU\n * \n * The device pointers of these buffers are then set on to the RenderData of the GPU\n * \n * For a documentation of what's packed into the members ('specular_and_darkening_and_coat_roughness' for example),\n * see the 'DevicePackedEffectiveMaterialSoA' class\n */\n\nstruct DevicePackedEffectiveMaterialSoAGPUData\n{\n    OrochiBuffer<UChar8BoolsPacked> flags;\n\n    OrochiBuffer<ColorRGB32F> emission;\n\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> base_color_roughness;\n\n    OrochiBuffer<float> oren_nayar_sigma;\n \n    OrochiBuffer<ColorRGB24bFloat0_1Packed> metallic_F90_and_metallic;\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> metallic_F82_packed_and_diffuse_transmission;\n    OrochiBuffer<float> metallic_F90_falloff_exponent;\n    OrochiBuffer<Float4xPacked> anisotropy_and_rotation_and_second_roughness;\n\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> specular_color_and_tint_factor;\n    OrochiBuffer<Float4xPacked> specular_and_darkening_and_coat_roughness;\n    OrochiBuffer<float> coat_medium_thickness;\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> coat_and_medium_absorption;\n    OrochiBuffer<Float4xPacked> coat_roughening_darkening_anisotropy_and_rotation;\n    OrochiBuffer<float> coat_ior;\n\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> sheen_and_color;\n\n    OrochiBuffer<float> ior;\n    OrochiBuffer<ColorRGB24bFloat0_1Packed> absorption_color_packed;\n    OrochiBuffer<float> absorption_at_distance;\n\n    OrochiBuffer<Float4xPacked> sheen_roughness_transmission_dispersion_thin_film;\n\n    OrochiBuffer<float> dispersion_abbe_number;\n    OrochiBuffer<float> thin_film_ior;\n    OrochiBuffer<float> thin_film_thickness;\n    OrochiBuffer<float> thin_film_kappa_3;\n    OrochiBuffer<float> thin_film_base_ior_override;\n    OrochiBuffer<Float2xUChar2xPacked> alpha_thin_film_hue_dielectric_priority;\n};\n\nstruct DevicePackedTexturedMaterialSoAGPUData : public DevicePackedEffectiveMaterialSoAGPUData\n{\n    OrochiBuffer<Uint2xPacked> normal_map_emission_index;\n    OrochiBuffer<Uint2xPacked> base_color_roughness_metallic_index;\n    OrochiBuffer<Uint2xPacked> roughness_and_metallic_index;\n    OrochiBuffer<Uint2xPacked> anisotropic_specular_index;\n    OrochiBuffer<Uint2xPacked> coat_sheen_index;\n    OrochiBuffer<Uint2xPacked> specular_transmission_index;\n\n    // Resize function using the generic for_each_member\n    void resize(size_t new_element_count)\n    {\n        m_element_count = new_element_count;\n\n        // This declares a std::tie of all the buffers\n        DECLARE_ALL_MEMBERS_STD_TIE;\n\n        // Function that will be applied to all the buffers to resize them\n        auto resize_lambda_function = [new_element_count](auto& buffer) { buffer.resize(new_element_count); };\n\n        // Applying the resize function to all the buffers\n        std::apply([&](auto&... args) { (resize_lambda_function(args), ...); }, all_members);\n    }\n\n    void upload_data(std::vector<DevicePackedTexturedMaterial>& gpu_packed_materials)\n    {\n        upload_data_partial(0, gpu_packed_materials.data(), gpu_packed_materials.size());\n    }\n\n    void upload_data_partial(unsigned int start_index, const DevicePackedTexturedMaterial* data, size_t element_count)\n    {\n        // Textured part\n        normal_map_emission_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, normal_map_emission_index), element_count).data(), element_count);\n        base_color_roughness_metallic_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, base_color_roughness_metallic_index), element_count).data(), element_count);\n        roughness_and_metallic_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, roughness_and_metallic_index), element_count).data(), element_count);\n        anisotropic_specular_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, anisotropic_specular_index), element_count).data(), element_count);\n        coat_sheen_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, coat_sheen_index), element_count).data(), element_count);\n        specular_transmission_index.upload_data_partial(start_index, expand_from_gpu_packed_materials<Uint2xPacked>(data, offsetof(DevicePackedTexturedMaterial, specular_transmission_index), element_count).data(), element_count);\n\n        // Non textured parameters\n        flags.upload_data_partial(start_index, expand_from_gpu_packed_materials<UChar8BoolsPacked>(data, offsetof(DevicePackedTexturedMaterial, flags), element_count).data(), element_count);\n\n        emission.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB32F>(data, offsetof(DevicePackedTexturedMaterial, emission), element_count).data(), element_count);\n\n        base_color_roughness.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, base_color_roughness), element_count).data(), element_count);\n\n        oren_nayar_sigma.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, oren_nayar_sigma), element_count).data(), element_count);\n\n        metallic_F90_and_metallic.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, metallic_F90_and_metallic), element_count).data(), element_count);\n        metallic_F82_packed_and_diffuse_transmission.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, metallic_F82_packed_and_diffuse_transmission), element_count).data(), element_count);\n        metallic_F90_falloff_exponent.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, metallic_F90_falloff_exponent), element_count).data(), element_count);\n        anisotropy_and_rotation_and_second_roughness.upload_data_partial(start_index, expand_from_gpu_packed_materials<Float4xPacked>(data, offsetof(DevicePackedTexturedMaterial, anisotropy_and_rotation_and_second_roughness), element_count).data(), element_count);\n\n        specular_color_and_tint_factor.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, specular_color_and_tint_factor), element_count).data(), element_count);\n        specular_and_darkening_and_coat_roughness.upload_data_partial(start_index, expand_from_gpu_packed_materials<Float4xPacked>(data, offsetof(DevicePackedTexturedMaterial, specular_and_darkening_and_coat_roughness), element_count).data(), element_count);\n        coat_medium_thickness.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, coat_medium_thickness), element_count).data(), element_count);\n        coat_and_medium_absorption.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, coat_and_medium_absorption), element_count).data(), element_count);\n        coat_roughening_darkening_anisotropy_and_rotation.upload_data_partial(start_index, expand_from_gpu_packed_materials<Float4xPacked>(data, offsetof(DevicePackedTexturedMaterial, coat_roughening_darkening_anisotropy_and_rotation), element_count).data(), element_count);\n        coat_ior.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, coat_ior), element_count).data(), element_count);\n\n        sheen_and_color.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, sheen_and_color), element_count).data(), element_count);\n\n        ior.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, ior), element_count).data(), element_count);\n        absorption_color_packed.upload_data_partial(start_index, expand_from_gpu_packed_materials<ColorRGB24bFloat0_1Packed>(data, offsetof(DevicePackedTexturedMaterial, absorption_color_packed), element_count).data(), element_count);\n        absorption_at_distance.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, absorption_at_distance), element_count).data(), element_count);\n\n        sheen_roughness_transmission_dispersion_thin_film.upload_data_partial(start_index, expand_from_gpu_packed_materials<Float4xPacked>(data, offsetof(DevicePackedTexturedMaterial, sheen_roughness_transmission_dispersion_thin_film), element_count).data(), element_count);\n\n        dispersion_abbe_number.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, dispersion_abbe_number), element_count).data(), element_count);\n        thin_film_ior.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, thin_film_ior), element_count).data(), element_count);\n        thin_film_thickness.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, thin_film_thickness), element_count).data(), element_count);\n        thin_film_kappa_3.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, thin_film_kappa_3), element_count).data(), element_count);\n        thin_film_base_ior_override.upload_data_partial(start_index, expand_from_gpu_packed_materials<float>(data, offsetof(DevicePackedTexturedMaterial, thin_film_base_ior_override), element_count).data(), element_count);\n        alpha_thin_film_hue_dielectric_priority.upload_data_partial(start_index, expand_from_gpu_packed_materials<Float2xUChar2xPacked>(data, offsetof(DevicePackedTexturedMaterial, alpha_thin_film_hue_dielectric_priority), element_count).data(), element_count);\n    }\n\n    DevicePackedTexturedMaterialSoA get_device_SoA_struct()\n    {\n        DevicePackedTexturedMaterialSoA out;\n\n        out.normal_map_emission_index = normal_map_emission_index.get_device_pointer();\n        out.base_color_roughness_metallic_index = base_color_roughness_metallic_index.get_device_pointer();\n        out.roughness_and_metallic_index = roughness_and_metallic_index.get_device_pointer();\n        out.anisotropic_specular_index = anisotropic_specular_index.get_device_pointer();\n        out.coat_sheen_index = coat_sheen_index.get_device_pointer();\n        out.specular_transmission_index = specular_transmission_index.get_device_pointer();\n\n        out.flags = flags.get_device_pointer();\n\n        out.emission = emission.get_device_pointer();\n\n        out.base_color_roughness = base_color_roughness.get_device_pointer();\n\n        out.oren_nayar_sigma = oren_nayar_sigma.get_device_pointer();\n\n        out.metallic_F90_and_metallic = metallic_F90_and_metallic.get_device_pointer();\n        out.metallic_F82_packed_and_diffuse_transmission = metallic_F82_packed_and_diffuse_transmission.get_device_pointer();\n        out.metallic_F90_falloff_exponent = metallic_F90_falloff_exponent.get_device_pointer();\n        out.anisotropy_and_rotation_and_second_roughness = anisotropy_and_rotation_and_second_roughness.get_device_pointer();\n\n        out.specular_color_and_tint_factor = specular_color_and_tint_factor.get_device_pointer();\n        out.specular_and_darkening_and_coat_roughness = specular_and_darkening_and_coat_roughness.get_device_pointer();\n        out.coat_medium_thickness = coat_medium_thickness.get_device_pointer();\n        out.coat_and_medium_absorption = coat_and_medium_absorption.get_device_pointer();\n        out.coat_roughening_darkening_anisotropy_and_rotation = coat_roughening_darkening_anisotropy_and_rotation.get_device_pointer();\n        out.coat_ior = coat_ior.get_device_pointer();\n\n        out.sheen_and_color = sheen_and_color.get_device_pointer();\n\n        out.ior = ior.get_device_pointer();\n        out.absorption_color_packed = absorption_color_packed.get_device_pointer();\n        out.absorption_at_distance = absorption_at_distance.get_device_pointer();\n\n        out.sheen_roughness_transmission_dispersion_thin_film = sheen_roughness_transmission_dispersion_thin_film.get_device_pointer();\n\n        out.dispersion_abbe_number = dispersion_abbe_number.get_device_pointer();\n        out.thin_film_ior = thin_film_ior.get_device_pointer();\n        out.thin_film_thickness = thin_film_thickness.get_device_pointer();\n        out.thin_film_kappa_3 = thin_film_kappa_3.get_device_pointer();\n        out.thin_film_base_ior_override = thin_film_base_ior_override.get_device_pointer();\n        out.alpha_thin_film_hue_dielectric_priority = alpha_thin_film_hue_dielectric_priority.get_device_pointer();\n\n        return out;\n    }\n\n    size_t m_element_count = 0;\n\nprivate:\n    /**\n     * Takes a pointer to some 'DevicePackedTexturedMaterial' in the 'gpu_packed_materials' array (which could be std::vector().data() for example) \n     * and returns a vector of type T that contains 'element_count' elements at offset 'offset' of the 'DevicePackedTexturedMaterial' structure\n     * \n     * For example:\n     * expand_from_gpu_packed_materials<Uint2xPacked>(3, gpu_packed_materials, offsetof(DevicePackedTexturedMaterial, normal_map_emission_index), 2)\n     * \n     * return an std::vector that contains the 'normal_map_emission_index' of gpu_packed_materials[3] and gpu_packed_materials[4]\n     */\n    template <typename T>\n    std::vector<T> expand_from_gpu_packed_materials(const DevicePackedTexturedMaterial* gpu_packed_materials, size_t offset_in_struct, size_t element_count)\n    {\n        std::vector<T> out(element_count);\n\n        for (int i = 0; i < element_count; i++)\n            out[i] = *reinterpret_cast<const T*>(reinterpret_cast<const char*>(&gpu_packed_materials[i]) + offset_in_struct);\n\n        return out;\n    }\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPUDataStructures/StatusBuffersGPUData.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n\nstruct StatusBuffersGPUData\n{\n\t// A single boolean to indicate whether there is still a ray active in\n\t// the kernel or not. Mostly useful when adaptive sampling is on and we\n\t// want to know if all pixels have converged or not yet\n\tOrochiBuffer<unsigned char> still_one_ray_active_buffer;\n\t// How many pixels have reached the render_settings.stop_pixel_noise_threshold.\n\t// Warning: This buffer does not count how many pixels have converged according to\n\t// the adaptive sampling noise threshold. This is only for the stop_pixel_noise_threshold\n\tOrochiBuffer<unsigned int> pixels_converged_count_buffer;\n};"
  },
  {
    "path": "src/Renderer/GPURenderer.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"Device/includes/BSDFs/SheenLTCFittedParameters.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n#include \"Renderer/Baker/GPUBaker.h\"\n#include \"Renderer/Baker/GPUBakerConstants.h\"\n#include \"Renderer/GPURenderer.h\"\n#include \"RenderPasses/FillGBufferRenderPass.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"UI/RenderWindow.h\"\n\n#include <Orochi/OrochiUtils.h>\n\n#include <condition_variable>\n\n// List of partials_options that will be specific to each kernel. We don't want these partials_options\n// to be synchronized between kernels\nconst std::unordered_set<std::string> GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED =\n{\n\tGPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL,\n\tGPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE,\n};\n\nconst std::string GPURenderer::ALL_RENDER_PASSES_TIME_KEY = \"FullFrameTime\";\nconst std::string GPURenderer::FULL_FRAME_TIME_WITH_CPU_KEY = \"FullFrameTimeWithCPU\";\nconst std::string GPURenderer::DEBUG_KERNEL_TIME_KEY = \"DebugKernelTime\";\n\nGPURenderer::GPURenderer(RenderWindow* render_window, std::shared_ptr<HIPRTOrochiCtx> hiprt_oro_ctx, std::shared_ptr<ApplicationSettings> application_settings)\n{\n\t// Creating buffers\n\tm_framebuffer = std::make_shared<OpenGLInteropBuffer<ColorRGB32F>>();\n\tm_denoiser_buffers.m_denoised_framebuffer = std::make_shared<OpenGLInteropBuffer<ColorRGB32F>>();\n\tm_denoiser_buffers.m_normals_AOV_interop_buffer = std::make_shared<OpenGLInteropBuffer<float3>>();\n\tm_denoiser_buffers.m_normals_AOV_no_interop_buffer = std::make_shared<OrochiBuffer<float3>>();\n\tm_denoiser_buffers.m_albedo_AOV_interop_buffer = std::make_shared<OpenGLInteropBuffer<ColorRGB32F>>();\n\tm_denoiser_buffers.m_albedo_AOV_no_interop_buffer = std::make_shared<OrochiBuffer<ColorRGB32F>>();\n\tm_pixels_converged_sample_count_buffer = std::make_shared<OrochiBuffer<int>>();\n\n\tm_DEBUG_SUMS.resize(1024);\n\tm_DEBUG_SUM_COUNT.resize(1024);\n\n\tm_hiprt_orochi_ctx = hiprt_oro_ctx;\t\n\tm_global_compiler_options = std::make_shared<GPUKernelCompilerOptions>();\n\t// Adding hardware acceleration by default if supported\n\tm_global_compiler_options->set_macro_value(\"__USE_HWI__\", device_supports_hardware_acceleration() == HardwareAccelerationSupport::SUPPORTED);\n\t// Just \"fixing\" the ReGIR options to be in sync with the UI\n\tif (m_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR &&\n\t\t(m_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_ONE_LIGHT || \n\t\tm_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_MIS_LIGHT_BSDF))\n\t\tm_global_compiler_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RIS_BSDF_AND_LIGHT);\n\n\tm_render_thread.init(this);\n\tm_device_properties = m_hiprt_orochi_ctx->device_properties;\n\tm_application_settings = application_settings;\n\n\tsetup_brdfs_data();\n\tsetup_filter_functions();\n\tm_render_thread.setup_render_passes(render_window);\n\n\tOROCHI_CHECK_ERROR(oroStreamCreate(&m_main_stream));\n\n\t// Buffer that keeps track of whether at least one ray is still alive or not\n\tm_status_buffers.still_one_ray_active_buffer.resize(1);\n\tm_status_buffers.still_one_ray_active_buffer.memset_whole_buffer(1);\n\tm_status_buffers.pixels_converged_count_buffer.resize(1);\n}\n\nGPURenderer::~GPURenderer()\n{\n\tm_render_thread.request_exit();\n}\n\nvoid GPURenderer::start_render_thread()\n{\n\tm_render_thread.start();\n}\n\nvoid GPURenderer::setup_brdfs_data()\n{\n\tinit_sheen_ltc_texture();\n\n\tload_GGX_energy_compensation_textures();\n\tload_glossy_dielectric_energy_compensation_textures();\n\tload_GGX_glass_energy_compensation_textures();\n}\n\nvoid GPURenderer::init_sheen_ltc_texture()\n{\n\t// CUDA/HIP do not handle 3 channels textures so we're padding it to 4 channels\n\tstd::vector<float> padded_ltc(32 * 32 * 4);\n\n\tfor (int y = 0; y < 32; y++)\n\t{\n\t\tfor (int x = 0; x < 32; x++)\n\t\t{\n\t\t\tint padded_index = (y * 32 + x) * 4;\n\t\t\tint non_padded_index = y * 32 + x;\n\n\t\t\tpadded_ltc[padded_index + 0] = ltc_parameters_table_approximation[non_padded_index].x;\n\t\t\tpadded_ltc[padded_index + 1] = ltc_parameters_table_approximation[non_padded_index].y;\n\t\t\tpadded_ltc[padded_index + 2] = ltc_parameters_table_approximation[non_padded_index].z;\n\t\t\tpadded_ltc[padded_index + 3] = 0.0f;\n\t\t}\n\t}\n\n\tImage32Bit sheen_ltc_params_image(padded_ltc.data(), 32, 32, 4);\n\tm_sheen_ltc_params = OrochiTexture(sheen_ltc_params_image, hipFilterModeLinear, hipAddressModeClamp);\n}\n\nvoid GPURenderer::load_GGX_energy_compensation_textures(hipTextureFilterMode filtering_mode)\n{\n\tImage32Bit GGXEss_image = Image32Bit::read_image_hdr(BRDFS_DATA_DIRECTORY \"/GGX/\" + GPUBakerConstants::get_GGX_conductor_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing), 1, true);\n\tm_GGX_conductor_directional_albedo = OrochiTexture(GGXEss_image, filtering_mode, hipAddressModeClamp);\n\n\tm_render_data_buffers_invalidated = true;\n}\n\nvoid GPURenderer::load_glossy_dielectric_energy_compensation_textures(hipTextureFilterMode filtering_mode)\n{\n\tsynchronize_all_kernels();\n\n\tstd::vector<Image32Bit> images(GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR);\n\tfor (int i = 0; i < GPUBakerConstants::GLOSSY_DIELECTRIC_TEXTURE_SIZE_IOR; i++)\n\t{\n\t\tstd::string filename = std::to_string(i) + GPUBakerConstants::get_glossy_dielectric_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n\t\tstd::string filepath = BRDFS_DATA_DIRECTORY \"/GlossyDielectrics/\" + filename;\n\t\timages[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n\t}\n\tm_glossy_dielectric_directional_albedo = OrochiTexture3D(images, filtering_mode == hipFilterModeLinear ? ORO_TR_FILTER_MODE_LINEAR : ORO_TR_FILTER_MODE_POINT, ORO_TR_ADDRESS_MODE_CLAMP);\n\n\tm_render_data_buffers_invalidated = true;\n}\n\nvoid GPURenderer::load_GGX_glass_energy_compensation_textures(hipTextureFilterMode filtering_mode)\n{\n\tsynchronize_all_kernels();\n\n\tstd::vector<Image32Bit> images(GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n\tfor (int i = 0; i < GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n\t{\n\t\tstd::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_glass_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n\t\tstd::string filepath = BRDFS_DATA_DIRECTORY \"/GGX/Glass/\" + filename;\n\t\timages[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n\t}\n\tm_GGX_glass_directional_albedo = OrochiTexture3D(images, filtering_mode == hipFilterModeLinear ? ORO_TR_FILTER_MODE_LINEAR : ORO_TR_FILTER_MODE_POINT, ORO_TR_ADDRESS_MODE_CLAMP);\n\n\tfor (int i = 0; i < GPUBakerConstants::GGX_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n\t{\n\t\tstd::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_glass_directional_albedo_inv_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n\t\tstd::string filepath = BRDFS_DATA_DIRECTORY \"/GGX/Glass/\" + filename;\n\t\timages[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n\t}\n\tm_GGX_glass_inverse_directional_albedo = OrochiTexture3D(images, filtering_mode == hipFilterModeLinear ? ORO_TR_FILTER_MODE_LINEAR : ORO_TR_FILTER_MODE_POINT, ORO_TR_ADDRESS_MODE_CLAMP);\n\n\timages.resize(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR);\n\tfor (int i = 0; i < GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR; i++)\n\t{\n\t\tstd::string filename = std::to_string(i) + GPUBakerConstants::get_GGX_thin_glass_directional_albedo_texture_filename(m_render_data.bsdfs_data.GGX_masking_shadowing);\n\t\tstd::string filepath = BRDFS_DATA_DIRECTORY \"/GGX/Glass/\" + filename;\n\t\timages[i] = Image32Bit::read_image_hdr(filepath, 1, true);\n\t}\n\tm_GGX_thin_glass_directional_albedo = OrochiTexture3D(images, filtering_mode == hipFilterModeLinear ? ORO_TR_FILTER_MODE_LINEAR : ORO_TR_FILTER_MODE_POINT, ORO_TR_ADDRESS_MODE_CLAMP);\n\n\tm_render_data_buffers_invalidated = true;\n}\n\nvoid GPURenderer::compute_emissives_power_alias_table(const Scene& scene)\n{\n\tcompute_emissives_power_alias_table(\n\t\tscene.emissive_triangles_primitive_indices,\n\t\tscene.vertices_positions, \n\t\tscene.triangles_vertex_indices,\n\t\tscene.material_indices,\n\t\tscene.materials,\n\t\t\n\t\tm_hiprt_scene.emissive_power_alias_table_probas, \n\t\tm_hiprt_scene.emissive_power_alias_table_alias,\n\t\tm_render_data.buffers.emissives_power_alias_table);\n\n\t// Not joining the thread that does the computation here because it will\n\t// be joined before starting the render since this method is called during\n\t// the initialization of the renderer\n}\n\nvoid GPURenderer::recompute_emissives_power_alias_table()\n{\n\tsynchronize_all_kernels();\n\n\tif (!needs_emissives_power_alias_table())\n\t{\n\t\tfree_emissives_power_alias_table();\n\n\t\treturn;\n\t}\n\n\tstd::vector<int> emissive_triangle_indices = m_hiprt_scene.emissive_triangles_primitive_indices.download_data();\n\tstd::vector<float3> vertices_positions = m_hiprt_scene.whole_scene_BLAS.download_vertices_positions();\n\tstd::vector<int> triangles_indices = m_hiprt_scene.whole_scene_BLAS.download_triangle_indices();\n\tstd::vector<int> material_indices = m_hiprt_scene.material_indices.download_data();\n\n\tcompute_emissives_power_alias_table(\n\t\temissive_triangle_indices,\n\t\tvertices_positions,\n\t\ttriangles_indices,\n\t\tmaterial_indices,\n\t\tm_current_materials,\n\n\t\tm_hiprt_scene.emissive_power_alias_table_probas,\n\t\tm_hiprt_scene.emissive_power_alias_table_alias,\n\t\tm_render_data.buffers.emissives_power_alias_table);\n\n\tThreadManager::join_threads(ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE);\n}\n\nvoid GPURenderer::compute_emissives_power_alias_table(\n\tconst std::vector<int>& emissive_triangle_indices,\n\tconst std::vector<float3>& vertices_positions,\n\tconst std::vector<int>& triangles_indices,\n\tconst std::vector<int>& material_indices,\n\tconst std::vector<CPUMaterial>& materials,\n\n\tOrochiBuffer<float>& alias_table_probas_buffer,\n\tOrochiBuffer<int>& alias_table_alias_buffer,\n\tDeviceAliasTable& power_alias_table)\n{\n\tThreadManager::add_dependency(ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE, ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES);\n\tThreadManager::start_thread(ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE, [\n\t\tthis,\n\t\t&emissive_triangle_indices, \n\t\t&vertices_positions,\n\t\t&triangles_indices, \n\t\t&material_indices,\n\t\t&materials,\n\n\t\t&alias_table_alias_buffer,\n\t\t&alias_table_probas_buffer,\n\t\t&power_alias_table] ()\n\t{\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\tif (!needs_emissives_power_alias_table())\n\t\t\treturn;\n\t\telse if (emissive_triangle_indices.size() == 0)\n\t\t\treturn;\n\n\t\tstd::vector<float> power_list(emissive_triangle_indices.size());\n\t\tfloat power_sum = 0.0f;\n\n\t\tfor (int i = 0; i < emissive_triangle_indices.size(); i++)\n\t\t{\n\t\t\tint emissive_triangle_index = emissive_triangle_indices[i];\n\n\t\t\t// Computing the area of the triangle\n\t\t\tfloat3 vertex_A = vertices_positions[triangles_indices[emissive_triangle_index * 3 + 0]];\n\t\t\tfloat3 vertex_B = vertices_positions[triangles_indices[emissive_triangle_index * 3 + 1]];\n\t\t\tfloat3 vertex_C = vertices_positions[triangles_indices[emissive_triangle_index * 3 + 2]];\n\n\t\t\tfloat3 AB = vertex_B - vertex_A;\n\t\t\tfloat3 AC = vertex_C - vertex_A;\n\n\t\t\tfloat3 normal = hippt::cross(AB, AC);\n\t\t\tfloat length_normal = hippt::length(normal);\n\t\t\tfloat triangle_area = 0.5f * length_normal;\n\n\t\t\tint mat_index = material_indices[emissive_triangle_index];\n\t\t\tfloat emission_luminance = materials[mat_index].emission.luminance() * materials[mat_index].emission_strength * materials[mat_index].global_emissive_factor;\n\n\t\t\tfloat area_power = emission_luminance * triangle_area;\n\n\t\t\tpower_list[i] = area_power;\n\t\t\tpower_sum += area_power;\n\t\t}\n\n\t\tstd::vector<float> alias_probas;\n\t\tstd::vector<int> alias_aliases;\n\t\tUtils::compute_alias_table(power_list, power_sum, alias_probas, alias_aliases);\n\n\t\talias_table_probas_buffer.resize(emissive_triangle_indices.size());\n\t\talias_table_alias_buffer.resize(emissive_triangle_indices.size());\n\n\t\talias_table_probas_buffer.upload_data(alias_probas);\n\t\talias_table_alias_buffer.upload_data(alias_aliases);\n\n\t\tpower_alias_table.alias_table_probas = alias_table_probas_buffer.get_device_pointer();\n\t\tpower_alias_table.alias_table_alias = alias_table_alias_buffer.get_device_pointer();\n\t\tpower_alias_table.size = emissive_triangle_indices.size();\n\t\tpower_alias_table.sum_elements = power_sum;\n\t});\n}\n\nvoid GPURenderer::free_emissives_power_alias_table()\n{\n\tif (m_hiprt_scene.emissive_power_alias_table_alias.size() > 0)\n\t\tm_hiprt_scene.emissive_power_alias_table_alias.free();\n\n\tif (m_hiprt_scene.emissive_power_alias_table_probas.size() > 0)\n\t\tm_hiprt_scene.emissive_power_alias_table_probas.free();\n\n\tm_render_data.buffers.emissives_power_alias_table.alias_table_alias = nullptr;\n\tm_render_data.buffers.emissives_power_alias_table.alias_table_probas = nullptr;\n\tm_render_data.buffers.emissives_power_alias_table.size = 0;\n\tm_render_data.buffers.emissives_power_alias_table.sum_elements = 0;\n}\n\nbool GPURenderer::needs_emissives_power_alias_table()\n{\n\tbool directly_using_power = m_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_POWER;\n\tbool using_regir_power =\n\t\tm_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR &&\n\t\tm_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_POWER;\n\tbool restir_di_presampling_using_power_sampling = \n\t\tm_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_RESTIR_DI &&\n\t\tm_global_compiler_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY) == LSS_BASE_POWER;\n\n\treturn directly_using_power || using_regir_power || restir_di_presampling_using_power_sampling;\n}\n\nstd::shared_ptr<GMoNRenderPass> GPURenderer::get_gmon_render_pass()\n{\n\treturn m_render_thread.get_gmon_render_pass();\n}\n\nstd::shared_ptr<NEEPlusPlusRenderPass> GPURenderer::get_NEE_plus_plus_render_pass()\n{\n\treturn m_render_thread.get_NEE_plus_plus_render_pass();\n}\n\nstd::shared_ptr<ReGIRRenderPass> GPURenderer::get_ReGIR_render_pass()\n{\n\treturn m_render_thread.get_ReGIR_render_pass();\n}\n\nstd::shared_ptr<ReSTIRDIRenderPass> GPURenderer::get_ReSTIR_DI_render_pass()\n{\n\treturn m_render_thread.get_ReSTIR_DI_render_pass();\n}\n\nstd::shared_ptr<ReSTIRGIRenderPass> GPURenderer::get_ReSTIR_GI_render_pass()\n{\n\treturn m_render_thread.get_ReSTIR_GI_render_pass();\n}\n\nNEEPlusPlusHashGridStorage& GPURenderer::get_nee_plus_plus_storage()\n{\n\treturn get_NEE_plus_plus_render_pass()->get_nee_plus_plus_storage();\n}\n\nvoid GPURenderer::setup_filter_functions()\n{\n\t// Function called on intersections for handling alpha testing\n\thiprtFuncNameSet alpha_testing_func_set = { nullptr, \"filter_function\" };\n\tm_func_name_sets.push_back(alpha_testing_func_set);\n\n\thiprtFuncDataSet func_data_set;\n\thiprtFuncTable func_table;\n\tHIPRT_CHECK_ERROR(hiprtCreateFuncTable(m_hiprt_orochi_ctx->hiprt_ctx, 1, 1, func_table));\n\tHIPRT_CHECK_ERROR(hiprtSetFuncTable(m_hiprt_orochi_ctx->hiprt_ctx, func_table, 0, 0, func_data_set));\n\n\tm_render_data.hiprt_function_table = func_table;\n}\n\nvoid GPURenderer::step_animations(float delta_time)\n{\n\tm_envmap.update(this, delta_time);\n\tm_camera_animation.animation_step(this, delta_time);\n}\n\nvoid GPURenderer::download_status_buffers()\n{\n\tOROCHI_CHECK_ERROR(oroMemcpy(&m_status_buffers_values.one_ray_active, m_status_buffers.still_one_ray_active_buffer.get_device_pointer(), sizeof(unsigned char), oroMemcpyDeviceToHost));\n\tOROCHI_CHECK_ERROR(oroMemcpy(&m_status_buffers_values.pixel_converged_count, m_status_buffers.pixels_converged_count_buffer.get_device_pointer(), sizeof(unsigned int), oroMemcpyDeviceToHost));\n}\n\nvoid GPURenderer::internal_clear_m_status_buffers()\n{\n\tm_status_buffers_values.one_ray_active = true;\n\tm_status_buffers_values.pixel_converged_count = 0;\n}\n\nbool GPURenderer::needs_global_bvh_stack_buffer()\n{\n\tfor (const auto& name_to_kernel : m_render_thread.get_render_graph().get_tracing_kernels())\n\t{\n\t\tbool global_stack_buffer_needed = false;\n\t\tglobal_stack_buffer_needed |= name_to_kernel.second->get_kernel_options().get_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL) == KERNEL_OPTION_TRUE;\n\n\t\tif (global_stack_buffer_needed)\n\t\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid GPURenderer::recreate_global_bvh_stack_buffer()\n{\n\tint nbBlocksX = std::ceil(m_render_resolution.x / (float)KernelBlockWidthHeight) * KernelBlockWidthHeight;\n\tint nbBlocksY = std::ceil(m_render_resolution.y / (float)KernelBlockWidthHeight) * KernelBlockWidthHeight;\n\n\t// Resizing the global stack buffer for BVH traversal\n\thiprtGlobalStackBufferInput stackBufferInput\n\t{\n\t\thiprtStackTypeGlobal,\n\t\thiprtStackEntryTypeInteger,\n\t\tstatic_cast<uint32_t>(m_render_data.global_traversal_stack_buffer_size),\n\t\tstatic_cast<uint32_t>(nbBlocksX * nbBlocksY)\n\t};\n\n\tif (m_render_data.global_traversal_stack_buffer.stackData != nullptr)\n\t\t// Freeing if the buffer already exists\n\t\tHIPRT_CHECK_ERROR(hiprtDestroyGlobalStackBuffer(m_hiprt_orochi_ctx->hiprt_ctx, m_render_data.global_traversal_stack_buffer));\n\n\tHIPRT_CHECK_ERROR(hiprtCreateGlobalStackBuffer(m_hiprt_orochi_ctx->hiprt_ctx, stackBufferInput, m_render_data.global_traversal_stack_buffer));\n}\n\nvoid GPURenderer::synchronize_all_kernels()\n{\n\tif (m_main_stream == nullptr)\n\t\treturn;\n\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_main_stream));\n\t\n\tm_render_thread.wait_on_render_completion();\n}\n\nbool GPURenderer::was_last_frame_low_resolution()\n{\n\treturn m_was_last_frame_low_resolution;\n}\n\nbool GPURenderer::frame_render_done()\n{\n\treturn m_render_thread.frame_render_done();\n}\n\nvoid GPURenderer::resize(int new_width, int new_height)\n{\n\t// Needed so that this function can eventually be called from another thread\n\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\tm_render_resolution = make_int2(new_width, new_height);\n\n\tsynchronize_all_kernels();\n\tunmap_buffers();\n\n\tm_framebuffer->resize(new_width * new_height);\n\tm_denoiser_buffers.m_denoised_framebuffer->resize(new_width * new_height);\n\tm_denoiser_buffers.resize_normals_buffer(new_width * new_height);\n\tm_denoiser_buffers.resize_albedo_buffer(new_width * new_height);\n\n\tif (m_render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n\t\tm_pixels_converged_sample_count_buffer->resize(new_width * new_height);\n\n\tif (m_render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n\t{\n\t\tm_pixels_squared_luminance_buffer.resize(new_width * new_height);\n\t\tm_pixels_sample_count_buffer.resize(new_width * new_height);\n\t}\n\n\tm_render_thread.get_render_graph().resize(new_width, new_height);\n\n\tm_pixel_active.resize(new_width * new_height);\n\n\t// Recomputing the perspective projection matrix since the aspect ratio\n\t// may have changed\n\tfloat new_aspect = (float)new_width / new_height;\n\tm_camera.set_aspect(new_aspect);\n\n\tif (needs_global_bvh_stack_buffer())\n\t\trecreate_global_bvh_stack_buffer();\n\n\tm_render_data.render_settings.render_resolution = m_render_resolution;\n\tm_render_data.render_settings.need_to_reset = true;\n\tm_render_data_buffers_invalidated = true;\n}\n\nvoid GPURenderer::render(float delta_time_gpu, RenderWindow* render_window)\n{\n\tm_render_thread.get_render_graph().update_is_render_pass_used();\n\tpre_render_update(delta_time_gpu, render_window);\n\n\t// Mapping the render buffers on the main thread so that we can use them in the render thread.\n\t// \n\t// This is done on the main thread because using OpenGL (required when mapping the buffers from OpenGL to CUDA/HIP)\n\t// on a non-main thread is a bit sketchy\n\tmap_buffers_for_render();\n\n\tif (m_render_data.render_settings.sample_number == 0)\n\t\t// If this is the very first sample, launching the prepass\n\t\t// of all the render passes\n\t\tm_render_thread.get_render_graph().prepass();\n\n\tHIPRTRenderData render_data_for_frame = m_render_data;\n\tGPUKernelCompilerOptions compiler_options_for_frame = m_global_compiler_options->deep_copy();\n\tm_render_thread.request_frame(render_data_for_frame, compiler_options_for_frame);\n}\n\nvoid GPURenderer::pre_render_update(float delta_time, RenderWindow* render_window)\n{\n\tm_render_thread.pre_render_update(delta_time, render_window);\n}\n\nvoid GPURenderer::map_buffers_for_render()\n{\n\tm_render_data.buffers.accumulated_ray_colors = m_framebuffer->map();\n\tm_render_data.buffers.gmon_estimator.result_framebuffer = get_gmon_render_pass()->map_result_framebuffer();\n\n\tm_render_data.aux_buffers.denoiser_normals = m_denoiser_buffers.map_normals_buffer();\n\tm_render_data.aux_buffers.denoiser_albedo = m_denoiser_buffers.map_albedo_buffer();\n\tif (m_render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n\t\tm_render_data.aux_buffers.pixel_converged_sample_count = m_pixels_converged_sample_count_buffer->get_device_pointer();\n}\n\nvoid GPURenderer::unmap_buffers()\n{\n\t// TODO we should only unmap buffers that need unmapping here\n\n\tm_framebuffer->unmap();\n\tget_gmon_render_pass()->unmap_result_framebuffer();\n\tm_denoiser_buffers.unmap_normals_buffer();\n\tm_denoiser_buffers.unmap_albedo_buffer();\n}\n\nvoid GPURenderer::set_use_denoiser_AOVs_interop_buffers(bool use_interop) { m_denoiser_buffers.set_use_interop_AOV_buffers(this, use_interop); }\n\nstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> GPURenderer::get_color_interop_framebuffer() \n{ \n\t// TODO use render graph here with render_graph.get_output_framebuffer()\n\tif (get_gmon_render_pass()->is_render_pass_used() && get_gmon_render_pass()->buffers_allocated())\n\t\treturn get_gmon_render_pass()->get_result_framebuffer();\n\telse\n\t\treturn m_framebuffer; \n}\n\nstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> GPURenderer::get_default_interop_framebuffer()\n{\n\treturn m_framebuffer;\n}\n\nstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> GPURenderer::get_denoised_interop_framebuffer() { return m_denoiser_buffers.m_denoised_framebuffer;}\nstd::shared_ptr<OpenGLInteropBuffer<float3>> GPURenderer::get_denoiser_normals_AOV_interop_buffer() \n{\n\tif (!m_denoiser_buffers.use_interop_AOVs)\n\t\t// No using the interop buffers so let's not return a buffer that cannot be used\n\t\treturn nullptr;\n\n\treturn m_denoiser_buffers.m_normals_AOV_interop_buffer; \n}\n\nstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> GPURenderer::get_denoiser_albedo_AOV_interop_buffer() \n{ \n\tif (!m_denoiser_buffers.use_interop_AOVs)\n\t\t// No using the interop buffers so let's not return a buffer that cannot be used\n\t\treturn nullptr;\n\n\treturn m_denoiser_buffers.m_albedo_AOV_interop_buffer; \n}\n\nstd::shared_ptr<OrochiBuffer<float3>> GPURenderer::get_denoiser_normals_AOV_no_interop_buffer() { return m_denoiser_buffers.m_normals_AOV_no_interop_buffer; }\nstd::shared_ptr<OrochiBuffer<ColorRGB32F>> GPURenderer::get_denoiser_albedo_AOV_no_interop_buffer() { return m_denoiser_buffers.m_albedo_AOV_no_interop_buffer; }\n\nstd::shared_ptr<OrochiBuffer<int>>& GPURenderer::get_pixels_converged_sample_count_buffer() { return m_pixels_converged_sample_count_buffer; }\nconst StatusBuffersValues& GPURenderer::get_status_buffer_values() const { return m_status_buffers_values; }\n\nHIPRTRenderSettings& GPURenderer::get_render_settings() { return m_render_data.render_settings; }\nstd::shared_ptr<ApplicationSettings> GPURenderer::get_application_settings() { return m_application_settings; }\nWorldSettings& GPURenderer::get_world_settings() { return m_render_data.world_settings; }\nHIPRTRenderData& GPURenderer::get_render_data() { return m_render_data; }\nHIPRTScene& GPURenderer::get_hiprt_scene() { return m_hiprt_scene; }\nstd::shared_ptr<HIPRTOrochiCtx> GPURenderer::get_hiprt_orochi_ctx() { return m_hiprt_orochi_ctx; }\n\nvoid GPURenderer::invalidate_render_data_buffers() \n{ \n\tm_render_data_buffers_invalidated = true; \n}\n\noroDeviceProp GPURenderer::get_device_properties() { return m_device_properties;}\n\nstd::string getDeviceName(oroCtx m_ctxt, oroDevice m_device)\n{\n\toroDeviceProp prop;\n\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_ctxt));\n\tOROCHI_CHECK_ERROR(oroGetDeviceProperties(&prop, m_device));\n\treturn std::string(prop.name);\n}\n\nstd::string getGcnArchName(oroCtx m_ctxt, oroDevice m_device)\n{\n\toroDeviceProp prop;\n\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_ctxt));\n\tOROCHI_CHECK_ERROR(oroGetDeviceProperties(&prop, m_device));\n\treturn std::string(prop.gcnArchName);\n}\n\nuint32_t getGcnArchNumber(oroCtx m_ctxt, oroDevice m_device)\n{\n\toroDeviceProp prop;\n\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_ctxt));\n\tOROCHI_CHECK_ERROR(oroGetDeviceProperties(&prop, m_device));\n\treturn prop.gcnArch;\n}\n\nbool enableHwi(oroCtx m_ctxt, oroDevice m_device)\n{\n\tstd::string\t   deviceName = getDeviceName(m_ctxt, m_device);\n\tconst uint32_t archNumber = getGcnArchNumber(m_ctxt, m_device);\n\treturn (archNumber >= 1030 && deviceName.find(\"NVIDIA\") == std::string::npos);\n}\n\nHardwareAccelerationSupport GPURenderer::device_supports_hardware_acceleration()\n{\n\tbool enabled = reinterpret_cast<hiprt::Context*>(m_hiprt_orochi_ctx->hiprt_ctx)->enableHwi();\n\tif (enabled)\n\t\treturn HardwareAccelerationSupport::SUPPORTED;\n\telse\n\t{\n\t\tif (std::string(m_device_properties.name).find(\"NVIDIA\") != std::string::npos)\n\t\t{\n\t\t\t// Not supported on NVIDIA\n\t\t\treturn HardwareAccelerationSupport::NVIDIA_UNSUPPORTED;\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Not NVIDIA but hardware acceleration not supported, assuming too old AMD\n\t\t\treturn HardwareAccelerationSupport::AMD_UNSUPPORTED;\n\t\t}\n\t}\n}\n\nstd::shared_ptr<GPUKernelCompilerOptions> GPURenderer::get_global_compiler_options()\n{\n\treturn m_global_compiler_options;\n}\n\n// Variables used to give the priority to the main thread when compiling shaders\nextern bool g_main_thread_compiling;\nextern std::condition_variable g_condition_for_compilation;\n\nvoid GPURenderer::recompile_kernels(bool use_cache)\n{\n\tsynchronize_all_kernels();\n\n\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Recompiling kernels...\");\n\n\t// Notifying all threads that may be compiling that the main thread wants to\n\t// compile. This will block threads other than the main thread from compiling\n\t// and thus give the priority to the main thread\n\n\tm_render_thread.get_render_graph().recompile(m_hiprt_orochi_ctx, m_func_name_sets, false, use_cache);\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> GPURenderer::get_all_kernels()\n{\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> kernels;\n\n\tfor (auto& name_to_kernel : m_render_thread.get_render_graph().get_all_kernels())\n\t\tkernels[name_to_kernel.first] = name_to_kernel.second;\n\n\treturn kernels;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> GPURenderer::get_tracing_kernels()\n{\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> kernels;\n\n\tfor (auto& name_to_kernel : m_render_thread.get_render_graph().get_tracing_kernels())\n\t\tkernels[name_to_kernel.first] = name_to_kernel.second;\n\n\treturn kernels;\n}\n\nvoid GPURenderer::set_debug_trace_kernel(const std::string& kernel_name, GPUKernelCompilerOptions options)\n{\n\tif (kernel_name == \"\")\n\t\t// Clearing the debug kernel\n\t\tm_render_thread.get_debug_trace_kernel() = GPUKernel();\n\telse\n\t{\n\t\tm_render_thread.get_debug_trace_kernel() = GPUKernel(DEVICE_KERNELS_DIRECTORY \"/\" + kernel_name + \".h\", kernel_name);\n\n\t\t// Setting all the custom options\n\t\tm_render_thread.get_debug_trace_kernel().get_kernel_options() = options;\n\t\tm_render_thread.get_debug_trace_kernel().compile(m_hiprt_orochi_ctx);\n\t}\n}\n\nbool GPURenderer::is_using_debug_kernel()\n{\n\treturn m_render_thread.get_debug_trace_kernel().has_been_compiled();\n}\n\noroStream_t GPURenderer::get_main_stream()\n{\n\treturn m_main_stream;\n}\n\nvoid GPURenderer::compute_render_pass_times()\n{\n\t// Registering the render times of all the kernels by iterating over all the kernels\n\tm_render_thread.get_render_graph().compute_render_times();\n\n\tif (m_render_thread.get_debug_trace_kernel().has_been_compiled())\n\t\t// If the debug kernel is being used... read its execution time\n\t\t// Note that we check for 'has_been_compiled()' because if the debug kernel isn't in use,\n\t\t// then the kernel (m_render_thread.get_debug_trace_kernel()) is empty, and if it's empty, then it hasn't\n\t\t// been compiled yet\n\t\tm_render_pass_times[GPURenderer::DEBUG_KERNEL_TIME_KEY] = m_render_thread.get_debug_trace_kernel().compute_execution_time();\n\n\tm_render_pass_times[GPURenderer::ALL_RENDER_PASSES_TIME_KEY] = m_render_thread.get_render_graph().get_full_frame_time();\n}\n\nstd::unordered_map<std::string, float>& GPURenderer::get_render_pass_times()\n{\n\treturn m_render_pass_times;\n}\n\nfloat GPURenderer::get_last_frame_time()\n{\n\treturn m_render_pass_times[GPURenderer::ALL_RENDER_PASSES_TIME_KEY];\n}\n\nvoid GPURenderer::update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics)\n{\n\tcompute_render_pass_times();\n\n\tm_render_thread.get_render_graph().update_perf_metrics(perf_metrics);\n\n\tperf_metrics->add_value(GPURenderer::ALL_RENDER_PASSES_TIME_KEY, m_render_pass_times[GPURenderer::ALL_RENDER_PASSES_TIME_KEY]);\n\n\tif (m_render_thread.get_debug_trace_kernel().has_been_compiled())\n\t\t// Adding the time for the debug kernel if it is in use\n\t\tperf_metrics->add_value(GPURenderer::DEBUG_KERNEL_TIME_KEY, m_render_pass_times[GPURenderer::DEBUG_KERNEL_TIME_KEY]);\n}\n\nvoid GPURenderer::reset(bool reset_by_camera_movement)\n{\n\tm_DEBUG_SUMS.memset_whole_buffer(0);\n\tm_DEBUG_SUM_COUNT.memset_whole_buffer(0);\n\n\tif (m_render_data.render_settings.accumulate)\n\t{\n\t\t// Only resetting the seed for deterministic rendering if we're accumulating.\n\t\t// If we're not accumulating, we want each frame of the render to be different\n\t\t// so we don't get into that if block and we don't reset the seed\n\t\tm_rng.m_state.seed = 42;\n\t\tm_render_data.random_number = 42;\n\t\tm_render_data.render_settings.need_to_reset = true;\n\t}\n\n\tinternal_clear_m_status_buffers();\n\n\tbool moving_camera_while_not_accumulating = reset_by_camera_movement && !m_render_data.render_settings.accumulate;\n\tif (!moving_camera_while_not_accumulating)\n\t\tm_render_data.render_settings.need_to_reset = true;\n\n\tm_render_thread.get_render_graph().reset(reset_by_camera_movement);\n}\n\nXorshift32Generator& GPURenderer::get_rng_generator()\n{\n\treturn m_rng;\n}\n\nvoid GPURenderer::update_render_data()\n{\n\tif (m_render_data_buffers_invalidated)\n\t{\n\t\tm_render_data.GPU_BVH = m_hiprt_scene.whole_scene_BLAS.m_geometry;\n\t\tm_render_data.light_GPU_BVH = m_hiprt_scene.emissive_triangles_BLAS.m_geometry;\n\n\t\tm_render_data.render_settings.DEBUG_SUM_TOTAL = m_DEBUG_SUMS.get_atomic_device_pointer();\n\t\tm_render_data.render_settings.DEBUG_SUM_COUNT = m_DEBUG_SUM_COUNT.get_atomic_device_pointer();\n\n\t\tm_render_data.buffers.triangles_indices = reinterpret_cast<int*>(m_hiprt_scene.whole_scene_BLAS.m_mesh.triangleIndices);\n\t\tm_render_data.buffers.vertices_positions = reinterpret_cast<float3*>(m_hiprt_scene.whole_scene_BLAS.m_mesh.vertices);\n\t\tm_render_data.buffers.has_vertex_normals = m_hiprt_scene.has_vertex_normals.get_device_pointer();\n\t\tm_render_data.buffers.vertex_normals = m_hiprt_scene.vertex_normals.get_device_pointer();\n\t\t// m_render_data.buffers.precomputed_emissive_triangles_data = PrecomputedEmissiveTrianglesDataSoAHostHelpers::to_device(m_hiprt_scene.precomputed_emissive_triangles_data);\n\n\t\tm_render_data.buffers.material_indices = m_hiprt_scene.material_indices.get_device_pointer();\n\t\tm_render_data.buffers.materials_buffer = m_hiprt_scene.materials_buffer.get_device_SoA_struct();\n\t\tm_render_data.buffers.material_opaque = m_hiprt_scene.material_opaque.get_device_pointer();\n\t\tm_render_data.buffers.emissive_triangles_count = m_hiprt_scene.emissive_triangles_count;\n\t\tif (m_hiprt_scene.emissive_triangles_primitive_indices.size() > 0)\n\t\t\tm_render_data.buffers.emissive_triangles_primitive_indices = reinterpret_cast<int*>(m_hiprt_scene.emissive_triangles_primitive_indices.get_device_pointer());\n\t\tif (m_hiprt_scene.emissive_triangles_indices_and_emissive_textures.size() > 0)\n\t\t\tm_render_data.buffers.emissive_triangles_primitive_indices_and_emissive_textures = reinterpret_cast<int*>(m_hiprt_scene.emissive_triangles_indices_and_emissive_textures.get_device_pointer());\n\t\tm_render_data.buffers.triangles_areas = m_hiprt_scene.triangle_areas.get_device_pointer();\n\t\tif (m_hiprt_scene.gpu_materials_textures.size() > 0)\n\t\t\tm_render_data.buffers.material_textures = m_hiprt_scene.gpu_materials_textures.get_device_pointer();\n\t\tif (m_hiprt_scene.texcoords_buffer.size() > 0)\n\t\t\tm_render_data.buffers.texcoords = reinterpret_cast<float2*>(m_hiprt_scene.texcoords_buffer.get_device_pointer());\n\n\t\tm_render_data.bsdfs_data.sheen_ltc_parameters_texture = m_sheen_ltc_params.get_device_texture();\n\t\tm_render_data.bsdfs_data.GGX_conductor_directional_albedo = m_GGX_conductor_directional_albedo.get_device_texture();\n\t\tm_render_data.bsdfs_data.glossy_dielectric_directional_albedo = m_glossy_dielectric_directional_albedo.get_device_texture();\n\t\tm_render_data.bsdfs_data.GGX_glass_directional_albedo = m_GGX_glass_directional_albedo.get_device_texture();\n\t\tm_render_data.bsdfs_data.GGX_glass_directional_albedo_inverse = m_GGX_glass_inverse_directional_albedo.get_device_texture();\n\t\tm_render_data.bsdfs_data.GGX_thin_glass_directional_albedo = m_GGX_thin_glass_directional_albedo.get_device_texture();\n\n\t\tif (m_render_data.render_settings.has_access_to_adaptive_sampling_buffers())\n\t\t{\n\t\t\tm_render_data.aux_buffers.pixel_sample_count = m_pixels_sample_count_buffer.get_device_pointer();\n\t\t\tm_render_data.aux_buffers.pixel_squared_luminance = m_pixels_squared_luminance_buffer.get_device_pointer();\n\t\t}\n\n\t\tm_render_data.aux_buffers.pixel_active = m_pixel_active.get_device_pointer();\n\t\tm_render_data.aux_buffers.still_one_ray_active = m_status_buffers.still_one_ray_active_buffer.get_device_pointer();\n\t\tm_render_data.aux_buffers.pixel_count_converged_so_far = m_status_buffers.pixels_converged_count_buffer.get_atomic_device_pointer();\n\n\t\tm_render_thread.get_render_graph().update_render_data();\n\n\t\tm_render_data_buffers_invalidated = false;\n\t}\n}\n\nvoid GPURenderer::set_hiprt_scene_from_scene(const Scene& scene)\n{\n\tif (scene.triangles_vertex_indices.size() == 0)\n\t\t// Empty scene, nothing todo\n\t\treturn;\n\n\tm_hiprt_scene.whole_scene_BLAS.upload_triangle_indices(scene.triangles_vertex_indices);\n\tm_hiprt_scene.whole_scene_BLAS.upload_vertices_positions(scene.vertices_positions);\n\tm_hiprt_scene.whole_scene_BLAS.m_hiprt_ctx = m_hiprt_orochi_ctx->hiprt_ctx;\n\trebuild_bvh(m_hiprt_scene.whole_scene_BLAS, hiprtBuildFlagBitPreferHighQualityBuild, true, true);\n\n\tm_hiprt_scene.emissive_triangles_BLAS.upload_triangle_indices(scene.emissive_triangle_vertex_indices);\n\tm_hiprt_scene.emissive_triangles_BLAS.copy_vertices_positions_from(m_hiprt_scene.whole_scene_BLAS);\n\tm_hiprt_scene.emissive_triangles_BLAS.m_hiprt_ctx = m_hiprt_orochi_ctx->hiprt_ctx;\n\trebuild_bvh(m_hiprt_scene.emissive_triangles_BLAS, hiprtBuildFlagBitPreferHighQualityBuild, true, true);\n\n\tm_hiprt_scene.has_vertex_normals.resize(scene.has_vertex_normals.size());\n\tm_hiprt_scene.has_vertex_normals.upload_data(scene.has_vertex_normals.data());\n\n\tm_hiprt_scene.vertex_normals.resize(scene.vertex_normals.size());\n\tm_hiprt_scene.vertex_normals.upload_data(scene.vertex_normals.data());\n\n\tm_hiprt_scene.material_indices.resize(scene.material_indices.size());\n\tm_hiprt_scene.material_indices.upload_data(scene.material_indices.data());\n\n\t// Uploading the materials after the textures have been parsed because texture\n\t// parsing can modify the materials (emission of constant textures are stored in the\n\t// material directly for example) so we need to wait for the end of texture parsing\n\t// to upload the materials\n\tThreadManager::add_dependency(ThreadManager::RENDERER_UPLOAD_MATERIALS, ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY);\n\tThreadManager::start_thread(ThreadManager::RENDERER_UPLOAD_MATERIALS, [this, &scene]()\n\t{\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\tstd::vector<DevicePackedTexturedMaterial> packed_gpu_materials(scene.materials.size());\n\t\tfor (int i = 0; i < scene.materials.size(); i++)\n\t\t\tpacked_gpu_materials[i] = scene.materials[i].pack_to_GPU();\n\n\t\tm_hiprt_scene.materials_buffer.resize(scene.materials.size());\n\t\tm_hiprt_scene.materials_buffer.upload_data(packed_gpu_materials);\n\n\t\t// Computing the opaqueness of materials i.e. whether or not they are FULLY opaque\n\t\tstd::vector<unsigned char> material_opaque(scene.materials.size());\n\t\tfor (int i = 0; i < scene.materials.size(); i++)\n\t\t\tmaterial_opaque[i] = scene.material_has_opaque_base_color_texture[i] && scene.materials[i].alpha_opacity == 1.0f;\n\t\tm_hiprt_scene.material_opaque.resize(material_opaque.size());\n\t\tm_hiprt_scene.material_opaque.upload_data(material_opaque);\n\t\tm_hiprt_scene.material_has_opaque_base_color_texture = scene.material_has_opaque_base_color_texture;\n\n\t\tm_hiprt_scene.texcoords_buffer.resize(scene.texcoords.size());\n\t\tm_hiprt_scene.texcoords_buffer.upload_data(scene.texcoords.data());\n\t});\n\n\tThreadManager::add_dependency(ThreadManager::RENDERER_UPLOAD_TRIANGLE_AREAS, ThreadManager::SCENE_LOADING_COMPUTE_TRIANGLE_AREAS);\n\tThreadManager::start_thread(ThreadManager::RENDERER_UPLOAD_TRIANGLE_AREAS, [this, &scene]()\n\t{\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\tm_hiprt_scene.triangle_areas.resize(scene.triangle_areas.size());\n\t\tm_hiprt_scene.triangle_areas.upload_data(scene.triangle_areas.data());\n\t});\n\n\tThreadManager::add_dependency(ThreadManager::RENDERER_UPLOAD_TEXTURES, ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY);\n\tThreadManager::start_thread(ThreadManager::RENDERER_UPLOAD_TEXTURES, [this, &scene]() \n\t{\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\tif (scene.textures.size() > 0)\n\t\t{\n\t\t\tstd::vector<oroTextureObject_t> oro_textures(scene.textures.size());\n\t\t\tm_hiprt_scene.orochi_materials_textures.reserve(scene.textures.size());\n\t\t\tfor (int i = 0; i < scene.textures.size(); i++)\n\t\t\t{\n\t\t\t\tif (scene.textures[i].width == 0 || scene.textures[i].height == 0)\n\t\t\t\t{\n\t\t\t\t\t// It can happen that for emissive textures for example, we had a texture but its color is constant.\n\t\t\t\t\t// As a result, we have not read the texture but rather just stored the constant emissive color in the\n\t\t\t\t\t// emission filed of the material so we have no texture to read here\n\n\t\t\t\t\t// The shader will never read from that texture (because the texture index of the material has been set to -1)\n\t\t\t\t\t// so we set it to nullptr\n\t\t\t\t\toro_textures[i] = nullptr;\n\n\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\t// We need to keep the texture alive so they are not destroyed when returning from \n\t\t\t\t// this function so we're adding them to a member buffer\n\t\t\t\tm_hiprt_scene.orochi_materials_textures.push_back(OrochiTexture(scene.textures[i], hipFilterModeLinear));\n\n\t\t\t\toro_textures[i] = m_hiprt_scene.orochi_materials_textures.back().get_device_texture();\n\t\t\t}\n\n\t\t\tm_hiprt_scene.gpu_materials_textures.resize(oro_textures.size());\n\t\t\tm_hiprt_scene.gpu_materials_textures.upload_data(oro_textures.data());\n\t\t}\n\t});\n\n\tThreadManager::add_dependency(ThreadManager::RENDERER_UPLOAD_EMISSIVE_TRIANGLES, ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES);\n\tThreadManager::start_thread(ThreadManager::RENDERER_UPLOAD_EMISSIVE_TRIANGLES, [this, &scene]() \n\t{\n\t\tm_hiprt_scene.emissive_triangles_count = scene.emissive_triangles_primitive_indices.size();\n\t\tif (m_hiprt_scene.emissive_triangles_count > 0)\n\t\t{\n\t\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\t\tm_hiprt_scene.emissive_triangles_primitive_indices.resize(scene.emissive_triangles_primitive_indices.size());\n\t\t\tm_hiprt_scene.emissive_triangles_primitive_indices.upload_data(scene.emissive_triangles_primitive_indices.data());\n\n\t\t\tm_hiprt_scene.emissive_triangles_indices_and_emissive_textures.resize(scene.emissive_triangles_primitive_indices_and_emissive_textures.size());\n\t\t\tm_hiprt_scene.emissive_triangles_indices_and_emissive_textures.upload_data(scene.emissive_triangles_primitive_indices_and_emissive_textures.data());\n\t\t}\n\n\t\t/*m_hiprt_scene.precomputed_emissive_triangles_data.resize(m_hiprt_scene.emissive_triangles_count);\n\t\tm_hiprt_scene.precomputed_emissive_triangles_data.template upload_to_buffer<PrecomputedEmissiveTrianglesDataSoAHostHelpers::VERTEX_A_BUFFER>(scene.triangle_A);\n\t\tm_hiprt_scene.precomputed_emissive_triangles_data.template upload_to_buffer<PrecomputedEmissiveTrianglesDataSoAHostHelpers::AB_BUFFER>(scene.triangle_AB);\n\t\tm_hiprt_scene.precomputed_emissive_triangles_data.template upload_to_buffer<PrecomputedEmissiveTrianglesDataSoAHostHelpers::AC_BUFFER>(scene.triangle_AC);*/\n\t});\n}\n\nvoid GPURenderer::rebuild_bvh(HIPRTGeometry& geometry, hiprtBuildFlags build_flags, bool do_compaction, bool disable_spatial_splits_on_OOM)\n{\n\tgeometry.build_bvh(build_flags, do_compaction, disable_spatial_splits_on_OOM, m_main_stream);\n}\n\nvoid GPURenderer::rebuild_whole_scene_bvh(hiprtBuildFlags build_flags, bool do_compaction, bool disable_spatial_splits_on_OOM)\n{\n\trebuild_bvh(m_hiprt_scene.whole_scene_BLAS, build_flags, do_compaction, disable_spatial_splits_on_OOM);\n}\n\nvoid GPURenderer::set_scene(const Scene& scene)\n{\n\tset_hiprt_scene_from_scene(scene);\n\tcompute_emissives_power_alias_table(scene);\n\n\tm_original_materials = scene.materials;\n\tm_current_materials = scene.materials;\n\tm_parsed_scene_metadata = scene.metadata;\n}\n\nvoid GPURenderer::set_envmap(const Image32Bit& envmap_image, const std::string& envmap_filepath)\n{\n\tThreadManager::add_dependency(ThreadManager::RENDERER_SET_ENVMAP, ThreadManager::ENVMAP_LOAD_FROM_DISK_THREAD);\n\tThreadManager::start_thread(ThreadManager::RENDERER_SET_ENVMAP, [this, &envmap_image, &envmap_filepath]() {\n\t\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_hiprt_orochi_ctx->orochi_ctx));\n\n\t\tif (envmap_image.width == 0 || envmap_image.height == 0)\n\t\t{\n\t\t\tif (m_render_data.world_settings.ambient_light_type == AmbientLightType::ENVMAP)\n\t\t\t\t// We were going for the envmap but it's not available so defaulting to\n\t\t\t\t// uniform lighting instead\n\t\t\t\tm_render_data.world_settings.ambient_light_type = AmbientLightType::UNIFORM;\n\n\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Empty envmap set on the GPURenderer... Defaulting to uniform ambient light instead.\");\n\n\t\t\treturn;\n\t\t}\n\n\t\tm_envmap.init_from_image(envmap_image, envmap_filepath);\n\t\tm_envmap.recompute_sampling_data_structure(this, &envmap_image);\n\n\t\tm_render_data.world_settings.envmap = m_envmap.get_packed_data_pointer();\n\t\tm_render_data.world_settings.envmap_width = m_envmap.get_width();\n\t\tm_render_data.world_settings.envmap_height = m_envmap.get_height();\n\t\t// We found an envmap so let's use it\n\t\tm_render_data.world_settings.ambient_light_type = AmbientLightType::ENVMAP;\n\n#if EnvmapSamplingStrategy == ESS_BINARY_SEARCH\n\t\tm_render_data.world_settings.envmap_cdf = m_envmap.get_cdf_device_pointer();\n\n\t\tm_render_data.world_settings.alias_table_probas = nullptr;\n\t\tm_render_data.world_settings.alias_table_alias = nullptr;\n#elif EnvmapSamplingStrategy == ESS_ALIAS_TABLE\n\t\tm_render_data.world_settings.envmap_cdf = nullptr;\n\n\t\tm_envmap.get_alias_table_device_pointers(m_render_data.world_settings.envmap_alias_table.alias_table_probas, m_render_data.world_settings.envmap_alias_table.alias_table_alias);\n#endif\n\t});\n}\n\nbool GPURenderer::has_envmap()\n{\n\treturn m_render_data.world_settings.envmap_height != 0 && m_render_data.world_settings.envmap_width != 0;\n}\n\nconst std::vector<CPUMaterial>& GPURenderer::get_original_materials()\n{\n\treturn m_original_materials;\n}\n\nconst std::vector<CPUMaterial>& GPURenderer::get_current_materials()\n{\n\treturn m_current_materials;\n}\n\nconst std::vector<std::string>& GPURenderer::get_material_names()\n{\n\treturn m_parsed_scene_metadata.material_names;\n}\n\nvoid GPURenderer::update_all_materials(std::vector<CPUMaterial>& materials)\n{\n\tm_current_materials = materials;\n\n\tstd::vector<unsigned char> new_opacity(materials.size());\n\tstd::vector<DevicePackedTexturedMaterial> packed_gpu_materials(materials.size());\n\tfor (int i = 0; i < materials.size(); i++)\n\t{\n\t\tpacked_gpu_materials[i] = materials[i].pack_to_GPU();\n\n\t\t// The material is fully opaque if its base color texture is fully opaque\n\t\t// and if the alpha opacity is fully opaque too (1.0f)\n\t\tnew_opacity[i] = materials[i].alpha_opacity == 1.0f && m_hiprt_scene.material_has_opaque_base_color_texture[i];\n\t}\n\n\t// Because the materials have changed, reuploading the \"precomputed oapcity\" of the materials\n\tm_hiprt_scene.material_opaque.upload_data(new_opacity);\n\tm_hiprt_scene.materials_buffer.upload_data(packed_gpu_materials);\n}\n\nvoid GPURenderer::update_one_material(CPUMaterial& material, int material_index)\n{\n\tm_current_materials[material_index] = material;\n\n\tDevicePackedTexturedMaterial packed_gpu_material = material.pack_to_GPU();\n\t// The material is fully opaque if its base color texture is fully opaque\n\t// and if the alpha opacity is fully opaque too (1.0f)\n\tunsigned char new_opacity = material.alpha_opacity == 1.0f && m_hiprt_scene.material_has_opaque_base_color_texture[material_index];\n\n\t// Because the materials have changed, reuploading the \"precomputed oapcity\" of the materials\n\tm_hiprt_scene.material_opaque.upload_data_partial(material_index, &new_opacity, 1);\n\tm_hiprt_scene.materials_buffer.upload_data_partial(material_index, &packed_gpu_material, 1);\n}\n\n\nconst std::vector<BoundingBox>& GPURenderer::get_mesh_bounding_boxes()\n{\n\treturn m_parsed_scene_metadata.mesh_bounding_boxes;\n}\n\nconst std::vector<std::string>& GPURenderer::get_mesh_names()\n{\n\treturn m_parsed_scene_metadata.mesh_names;\n}\n\nconst std::vector<int>& GPURenderer::get_mesh_material_indices()\n{\n\treturn m_parsed_scene_metadata.mesh_material_indices;\n}\n\nCamera& GPURenderer::get_camera()\n{\n\treturn m_camera;\n}\n\nCamera& GPURenderer::get_previous_frame_camera()\n{\n\treturn m_previous_frame_camera;\n}\n\nCameraAnimation& GPURenderer::get_camera_animation()\n{\n\treturn m_camera_animation;\n}\n\nRendererEnvmap& GPURenderer::get_envmap()\n{\n\treturn m_envmap;\n}\n\nSceneMetadata& GPURenderer::get_scene_metadata()\n{\n\treturn m_parsed_scene_metadata;\n}\n\nRenderGraph& GPURenderer::get_render_graph()\n{\n\treturn m_render_thread.get_render_graph();\n}\n\nvoid GPURenderer::set_camera(const Camera& camera)\n{\n\tm_camera = camera;\n\tm_camera_animation.set_camera(&m_camera);\n}\n\nvoid GPURenderer::resize_g_buffer_ray_volume_states()\n{\n\tstd::dynamic_pointer_cast<FillGBufferRenderPass>(m_render_thread.get_render_graph().get_render_pass(FillGBufferRenderPass::FILL_GBUFFER_RENDER_PASS_NAME))->resize_g_buffer_ray_volume_states();\n}\n\nvoid GPURenderer::translate_camera_view(glm::vec3 translation)\n{\n\tm_camera.translate(translation);\n}\n\nvoid GPURenderer::rotate_camera_view(glm::vec3 rotation_angles)\n{\n\tm_camera.rotate(rotation_angles);\n}\n\nvoid GPURenderer::zoom_camera_view(float offset)\n{\n\tm_camera.zoom(offset);\n}\n\nRendererAnimationState& GPURenderer::get_animation_state()\n{\n\treturn m_animation_state;\n}\n"
  },
  {
    "path": "src/Renderer/GPURenderer.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_RENDERER_H\n#define GPU_RENDERER_H\n\n#include \"Compiler/GPUKernel.h\"\n#include \"Device/kernel_parameters/ReSTIR/DI/LightPresamplingParameters.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HIPRT-Orochi/OrochiTexture3D.h\"\n#include \"HIPRT-Orochi/HIPRTScene.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"Renderer/GPUDataStructures/DenoiserBuffersGPUData.h\"\n#include \"Renderer/GPUDataStructures/StatusBuffersGPUData.h\"\n#include \"Renderer/GPURendererThread.h\"\n#include \"Renderer/HardwareAccelerationSupport.h\"\n#include \"Renderer/OpenImageDenoiser.h\"\n#include \"Renderer/RendererAnimationState.h\"\n#include \"Renderer/RendererEnvmap.h\"\n#include \"Renderer/RenderPasses/GMoNRenderPass.h\"\n#include \"Renderer/RenderPasses/RenderGraph.h\"\n#include \"Renderer/RenderPasses/NEEPlusPlusRenderPass.h\"\n#include \"Renderer/RenderPasses/ReGIRRenderPass.h\"\n#include \"Renderer/RenderPasses/ReSTIRDIRenderPass.h\"\n#include \"Renderer/RenderPasses/ReSTIRGIRenderPass.h\"\n#include \"Renderer/StatusBuffersValues.h\"\n#include \"Scene/Camera.h\"\n#include \"Scene/CameraAnimation.h\"\n#include \"Scene/SceneParser.h\"\n#include \"UI/ApplicationSettings.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n\n#include <unordered_map>\n#include <vector>\n\nclass RenderWindow;\n\ntemplate <typename T>\nclass OpenGLInteropBuffer;\n\nclass GPURenderer\n{\npublic:\n\t// List of compiler options that will be specific to each kernel. We don't want these options\n\t// to be synchronized between kernels\n\tstatic const std::unordered_set<std::string> KERNEL_OPTIONS_NOT_SYNCHRONIZED;\n\n\t// Key for indexing m_render_pass_times that contains the times per passes\n\t// This key is for the time of the whole frame\n\tstatic const std::string ALL_RENDER_PASSES_TIME_KEY;\n\tstatic const std::string FULL_FRAME_TIME_WITH_CPU_KEY;\n\tstatic const std::string DEBUG_KERNEL_TIME_KEY;\n\n\t/**\n\t * Constructs a renderer that will be using the given HIPRT/Orochi\n\t * context for handling GPU acceleration structures, buffers, textures, etc...\n\t */\n\tGPURenderer(RenderWindow* render_window, std::shared_ptr<HIPRTOrochiCtx> hiprt_oro_ctx, std::shared_ptr<ApplicationSettings> application_settings);\n\t~GPURenderer();\n\n\tvoid start_render_thread();\n\n\tvoid setup_brdfs_data();\n\n\t/**\n\t * Initializes and uploads the fitted parameters for the LTC sheen lobe\n\t * of the Principled BSDF\n\t */\n\tvoid init_sheen_ltc_texture();\n\n\t/**\n \t * Initializes the precomputed texture used for GGX energy conservation\n\t */\n\tvoid load_GGX_energy_compensation_textures(hipTextureFilterMode filtering_mode = hipFilterModeLinear);\n\n\t/**\n\t * Initializes the precomputed texture used for glossy dielectrics \n\t * energy conservation\n\t */\n\tvoid load_glossy_dielectric_energy_compensation_textures(hipTextureFilterMode filtering_mode = hipFilterModePoint);\n\n\t/**\n\t * Initializes the precomputed textures used for GGX glass BSDF energy conservation\n\t */\n\tvoid load_GGX_glass_energy_compensation_textures(hipTextureFilterMode filtering_mode = hipFilterModePoint);\n\n\t/**\n\t * Computes the alias table for sampling emissive triangles according to power\n\t */\n\tvoid compute_emissives_power_alias_table(const Scene& scene);\n\t/**\n\t * Overload for computing the alias table at runtime.\n\t * This will read the data from the GPU and is thus slower than the overload with the 'scene' parameter\n\t * \n\t * This function is mainly used because at runtime, we don't have the scene data anymore since it's been freed\n\t * from the CPU to save on RAM\n\t */\n\tvoid recompute_emissives_power_alias_table();\n\tvoid free_emissives_power_alias_table();\n\tbool needs_emissives_power_alias_table();\n\n\tstd::shared_ptr<GMoNRenderPass> get_gmon_render_pass();\n\tstd::shared_ptr<NEEPlusPlusRenderPass> get_NEE_plus_plus_render_pass();\n\tstd::shared_ptr<ReGIRRenderPass> get_ReGIR_render_pass();\n\tstd::shared_ptr<ReSTIRDIRenderPass> get_ReSTIR_DI_render_pass();\n\tstd::shared_ptr<ReSTIRGIRenderPass> get_ReSTIR_GI_render_pass();\n\n\tNEEPlusPlusHashGridStorage& get_nee_plus_plus_storage();\n\n\t/**\n\t * Initializes the filter function used by the kernels\n\t */\n\tvoid setup_filter_functions();\n\n\t/**\n\t * Steps all the animations of the renderer one step forward\n\t * \n\t * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n\t * call to step_animations()\n\t */\n\tvoid step_animations(float delta_time);\n\n\t/**\n\t * Blocking that waits for all the operations queued on\n\t * the main stream to complete\n\t */\n\tvoid synchronize_all_kernels();\n\n\t/**\n\t * Returns true if the last frame was rendered with render_settings.wants_render_low_resolution = true.\n\t * False otherwise\n\t */\n\tbool was_last_frame_low_resolution();\n\n\t/**\n\t * Whether or not the current frame has finished rendering\n\t */\n\tbool frame_render_done();\n\n\t/**\n\t * Resizes all the buffers of the renderer to the given new width and height\n\t */\n\tvoid resize(int new_width, int new_height);\n\n\t/**\n\t * Requests a frame to the render thread\n\t */\n\tvoid render(float delta_time_gpu, RenderWindow* render_window);\n\n\t/**\n\t * This function is in charge of updating various \"dynamic attributes/properties/buffers\" of the renderer before rendering a frame.\n\t *\n\t * These \"dynamic attributes/properties/buffers\" can be the adaptive sampling buffers for example.\n\t *\n\t * It will be checked each whether or not the adaptive sampling buffers need to be\n\t * allocated or freed and action will be taken accordingly. This function basically enables a\n\t * nice behavior of the application in which the renderer \"automatically\" reacts to changes\n\t * that could be made (through the ImGui interface for example) so that it is always in the\n\t * correct state. Said othrewise, this function can be seen as a centralized place for updating\n\t * various stuff of the renderer instead of having to scatter these update calls everywhere\n\t * in the code.\n\t *\n\t * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n\t * call to pre_render_update()\n\t */\n\tvoid pre_render_update(float delta_time, RenderWindow* render_window);\n\n\t/**\n\t * Maps the buffers shared with OpenGL that are needed for rendering the frame and sets\n\t * their mapped pointer into m_render_data\n\t */\n\tvoid map_buffers_for_render();\n\n\t/**\n\t * Unmap the color framebuffer, the denoiser albedo and the\n\t * denoiser normals buffers so that OpenGL can use them\n\t */\n\tvoid unmap_buffers();\n\n\t/**\n\t * Switches to using OpenGL interop buffers or classical non-interop CUDA/HIP buffers for the denoiser AOVs.\n\t * \n\t * OpenGL Interop buffers seems to be slower and slow down the path tracing kernel on AMD for some reasons.\n\t * Using non-interop buffers thus increases rendering performance but tanks denoising performance\n\t */\n\tvoid set_use_denoiser_AOVs_interop_buffers(bool use_interop);\n\n\t/**\n\t * Returns the framebuffer that should be used for displaying to the viewport\n\t * \n\t * At the time of writing this comment, this is either the default framebuffer where the\n\t * ray colors are accumulated or the GMoN result framebuffer where the median of means are\n\t * computed for fireflies reduction\n\t */\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> get_color_interop_framebuffer();\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> get_default_interop_framebuffer();\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> get_denoised_interop_framebuffer();\n\tstd::shared_ptr<OpenGLInteropBuffer<float3>> get_denoiser_normals_AOV_interop_buffer();\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> get_denoiser_albedo_AOV_interop_buffer();\n\tstd::shared_ptr<OrochiBuffer<float3>> get_denoiser_normals_AOV_no_interop_buffer();\n\tstd::shared_ptr<OrochiBuffer<ColorRGB32F>> get_denoiser_albedo_AOV_no_interop_buffer();\n\tstd::shared_ptr<OrochiBuffer<int>>& get_pixels_converged_sample_count_buffer();\n\t/**\n\t * Returns a structure that contains the values of\n\t * various one-variable buffers of the renderer such\n\t * as 'one_ray_active' or 'pixel_converged_count' for example\n\t */\n\tconst StatusBuffersValues& get_status_buffer_values() const;\n\t/**\n\t * Memcpy the values of the status buffers to m_status_buffer_values\n\t */\n\tvoid download_status_buffers();\n\n\tHIPRTRenderSettings& get_render_settings();\n\tstd::shared_ptr<ApplicationSettings> get_application_settings();\n\tWorldSettings& get_world_settings();\n\tHIPRTRenderData& get_render_data();\n\tHIPRTScene& get_hiprt_scene();\n\tstd::shared_ptr<HIPRTOrochiCtx> get_hiprt_orochi_ctx();\n\tvoid invalidate_render_data_buffers();\n\n\tCamera& get_camera();\n\tCamera& get_previous_frame_camera();\n\tCameraAnimation& get_camera_animation();\n\tRendererEnvmap& get_envmap();\n\tSceneMetadata& get_scene_metadata();\n\tRenderGraph& get_render_graph();\n\n\tvoid set_scene(const Scene& scene);\n\n\tvoid rebuild_bvh(HIPRTGeometry& geometry, hiprtBuildFlags build_flags, bool do_compaction, bool disable_spatial_splits_on_OOM);\n\t/**\n\t * The 'disable_spatial_splits_on_OOM' parameter will try to rebuild the BVH without spatial splits if the first try\n\t * failed because of not enough VRAM\n\t */\n\tvoid rebuild_whole_scene_bvh(hiprtBuildFlags build_flags, bool do_compaction, bool disable_spatial_splits_on_OOM = false);\n\tvoid set_camera(const Camera& camera);\n\tvoid set_envmap(const Image32Bit& envmap, const std::string& envmap_filepath);\n\tbool has_envmap();\n\n\tconst std::vector<CPUMaterial>& get_original_materials();\n\tconst std::vector<CPUMaterial>& get_current_materials();\n\tconst std::vector<std::string>& get_material_names();\n\n\t/**\n\t * Updates all the materials of the renderer and reuploads them all to the GPU\n\t */\n\tvoid update_all_materials(std::vector<CPUMaterial>& materials);\n\t/**\n\t * Updates only the material with index 'material_index' and uploads it to the GPU \n\t */\n\tvoid update_one_material(CPUMaterial& material, int material_index);\n\n\tconst std::vector<BoundingBox>& get_mesh_bounding_boxes();\n\tconst std::vector<std::string>& get_mesh_names();\n\tconst std::vector<int>& get_mesh_material_indices();\n\n\t/**\n\t * Returns the size of the RayVolumeState struct on the GPU.\n\t * \n\t * Useful when the size of the struct changes because the nested dielectrics\n\t * stack size changed but we have no easy way to find out what's the new size\n\t * of the struct on the CPU to upload the correct data size.\n\t * \n\t * There's no easy way to find the new size of the struct on the CPU because\n\t * the RayVolumeState struct includes a NestedDielectricsInteriorStack struct whose size\n\t * is defined at compilation time. If the nested dielectrics stack size changes\n\t * at runtime (possible through ImGui), then we need to recompute the size of\n\t * the RayVolumeState structure on the CPU to be able to properly resize the\n\t * GPU buffers that use the RayVolumeState (in the GBuffer for example).\n\t * However, again, that size is determined at compilation time so we can't\n\t * know on the CPU what's going to be the new size. To circumvent that, we\n\t * use the fact that shader are recompiled on the GPU and so the shaders know\n\t * the new size. This function thus launches a kernel on the GPU to querry\n\t * the size of the structure.\n\t */\n\t// size_t get_ray_volume_state_byte_size();\n\n\t/**\n\t * Resizes the ray_volume_states array of the GBuffers\n\t * (current frame and previous frames if used) so that it matches the size of RayVolumeState being used on the GPU\n\t */\n\tvoid resize_g_buffer_ray_volume_states();\n\n\tvoid translate_camera_view(glm::vec3 translation);\n\t/**\n\t * Rotates the camera by the given angles (in radians)\n\t */\n\tvoid rotate_camera_view(glm::vec3 rotation_angles);\n\tvoid zoom_camera_view(float offset);\n\n\tRendererAnimationState& get_animation_state();\n\n\toroDeviceProp get_device_properties();\n\tHardwareAccelerationSupport device_supports_hardware_acceleration();\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> get_global_compiler_options();\n\n\tvoid recompile_kernels(bool use_cache = true);\n\n\t/**\n\t * Returns a map of all the kernels of the renderer\n\t * \n\t * The map keys are the kernel name\n\t * The map values are the kernel themselves\n\t */\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> get_all_kernels();\n\t/**\n\t * Returns a map of all the kernels of the renderer that trace rays (shadow rays, bounce rays, ...)\n\t * \n\t * This is used in ImGui in the performance settings panel where we can adjust the\n\t * amount of shared memory used for the BVH traversal. Because this is only useful for\n\t * kernels that trace rays, we want a function that returns only the kernels that trace rays\n\t *\n\t * The map keys are the kernel name\n\t * The map values are the kernel themselves\n\t */\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels();\n\n\t//std::vector<std::string> get_all_kernel_ids();\n\t/**\n\t * Sets the debug kernel to be used.\n\t * \n\t * The kernel is expected to be in a file called {kernel_name}.h and the entry point\n\t * function is expected to be {kernel_name}\n\t * \n\t * Calling this function with an empty string as parameter clears the debug kernel\n\t */\n\tvoid set_debug_trace_kernel(const std::string& kernel_name, GPUKernelCompilerOptions options = GPUKernelCompilerOptions());\n\tbool is_using_debug_kernel();\n\toroStream_t get_main_stream();\n\n\tstd::unordered_map<std::string, float>& get_render_pass_times();\n\t/**\n\t * Returns the time taken to compute the last frame in milliseconds\n\t */\n\tfloat get_last_frame_time();\n\n\tvoid update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics);\n\n\tvoid reset(bool reset_by_camera_movement);\n\n\tXorshift32Generator& get_rng_generator();\n\n\tint2 m_render_resolution = make_int2(0, 0);\n\n\tCamera m_camera;\n\tCamera m_previous_frame_camera;\n\t// Animator of the camera of the current frame ('m_camera')\n\tCameraAnimation m_camera_animation;\n\n\tOrochiBuffer<unsigned long long int> m_DEBUG_SUM_COUNT;\n\tOrochiBuffer<unsigned long long int> m_DEBUG_SUMS;\n\nprivate:\n\t// So that GPURendererThread can access the private members of GPURenderer without\n\t// having to write a thousand getters that would expose many members to everyone else\n\tfriend class GPURendererThread;\n\n\tvoid set_hiprt_scene_from_scene(const Scene& scene);\n\tvoid update_render_data();\n\n\t/**\n\t * Private function that does the actual alias table recomputation\n\t */\n\tvoid compute_emissives_power_alias_table(\n\t\tconst std::vector<int>& emissive_triangle_indices,\n\t\tconst std::vector<float3>& vertices_positions,\n\t\tconst std::vector<int>& triangles_indices,\n\t\tconst std::vector<int>& material_indices,\n\t\tconst std::vector<CPUMaterial>& materials,\n\n\t\tOrochiBuffer<float>& alias_table_probas_buffer,\n\t\tOrochiBuffer<int>& alias_table_alias_buffer,\n\t\tDeviceAliasTable& power_alias_table);\n\n\t/**\n\t * Returns true if one of the kernels requires the global stack buffer for BVH traversal\n\t */\n\tbool needs_global_bvh_stack_buffer();\n\t/**\n\t * Frees and recreates the global stack buffer for the BVH traversal based on the\n\t * current resolution of the renderer\n\t */\n\tvoid recreate_global_bvh_stack_buffer();\n\n\t/**\n\t * Reads the execution time of the kernels and stores those execution times in 'm_render_pass_times'\n\t */\n\tvoid compute_render_pass_times();\n\n\t// ---- Functions called by the pre_render_update() method ----\n\t//\n\n\t//\n\t// -------- Functions called by the pre_render_update() method ---------\n\n\tvoid internal_clear_m_status_buffers();\n\n\tGPURendererThread m_render_thread;\n\n\t// Some render passes want the application settings of the render window so it's there\n\tstd::shared_ptr<ApplicationSettings> m_application_settings;\n\n\t// Properties of the device\n\toroDeviceProp m_device_properties = { .gcnArchName = \"\" };\n\n\t// GPU events to time the frame\n\t/*oroEvent_t m_frame_start_event = nullptr;\n\toroEvent_t m_frame_stop_event = nullptr;*/\n\t// If true, the last call to render() rendered a frame where render_settings.render_low_resoltion was true.\n\t// False otherwise\n\tbool m_was_last_frame_low_resolution = false;\n\t// If true, the buffer pointers of m_render_data will be updated when pre_render_update() is called.\n\t// This boolean is mainly set to true when resizing the renderer since resizing re-creates the \n\t// buffers -> invalidates the pointer -> we need to set them back on render_data\n\t//\n\t// Modifying the scene also invalidates the m_render_data buffers. \n\t// Freeing / allocating ReSTIR DI/adaptive sampling buffers (or any buffers that can be allocated / dealloacted) too\n\tbool m_render_data_buffers_invalidated = true;\n\t// Whether or not the renderer was updated (with pre_render_update()) since the last render() call.\n\t// This is only used as a security to avoid misusing the renderer class and calling render()\n\t// without having called pre_render_update() before\n\tbool m_updated = false;\n\n\t// Time taken per each pass of the renderer for the last frame.\n\t// \n\t// Some more keys are defined as static const std::string members of this class\n\tstd::unordered_map<std::string, float> m_render_pass_times;\n\n\t// This buffer holds the * sum * of the samples computed\n\t// This is an accumulation buffer. This needs to be divided by the\n\t// number of samples for displaying\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> m_framebuffer;\n\t// AOVs and buffers needed by the denoiser that are filled/manipulated by the renderer.\n\t// This is just a structure to aggregate them all instead of having multiple\n\t// variables in the renderer class\n\tDenoiserBuffersGPUData m_denoiser_buffers;\n\n\t// Used to calculate the variance of each pixel for adaptive sampling\n\tOrochiBuffer<float> m_pixels_squared_luminance_buffer;\n\t// This buffer stores the number of samples accumulated *until* a pixel has converged\n\t// (\"converged\" is according to adaptive sampling or pixel stop noise threshold)\n\tstd::shared_ptr<OrochiBuffer<int>> m_pixels_converged_sample_count_buffer;\n\t// This buffer is necessary because with adaptive sampling, each pixel\n\t// can have accumulated a different number of sample\n\tOrochiBuffer<int> m_pixels_sample_count_buffer;\n\tOrochiBuffer<unsigned char> m_pixel_active;\n\n\tStatusBuffersGPUData m_status_buffers;\n\t// Whether or not the pixel at the given index is active and needs more samples\n\t// Structure that holds the values of the one-variable buffers of the renderer.\n\t// These values are 'one_ray_active' or 'pixel_converged_count' for example.\n\t// These values are updated when the pre_render_update() is called\n\tStatusBuffersValues m_status_buffers_values;\n\n\t// Some additional info about the parsed scene such as materials names, mesh names, ...\n\tSceneMetadata m_parsed_scene_metadata;\n\t// The original materials of the scene. Those are the materials that have directly been read from the hard drive scene file.\n\t// Used in case the user wants to revert every changes that have been done\n\tstd::vector<CPUMaterial> m_original_materials;\n\t// Materials currently being used by the GPU. Those are the materials *currently* being\n\t// used for rendering\n\tstd::vector<CPUMaterial> m_current_materials;\n\t// The material names are used for displaying in the ImGui editor\n\t// AABB of the meshes of the scene\n\tstd::vector<BoundingBox> m_mesh_bounding_boxes;\n\n\t// Options used for compiling the render passes of this renderer.\n\t// \n\t// Most of the options in there are shared with all the passes. For example,\n\t// the \"__USE_HWI__\" macro that dictates whether to use hardware acceleration\n\t// ray tracing is shared between all kernels (because there's no real reasons for \n\t// one kernel not to use it if all other kernels use it).\n\t// The value 1 or 0 of this macro is stored in this 'm_global_compiler_options' member\n\t// and is 'synchronized' through the use of pointers with the options of the other kernels.\n\t// See 'setup_render_passes' for more details on how that \"synchronization\" is setup\n\tstd::shared_ptr<GPUKernelCompilerOptions> m_global_compiler_options;\n\n\t// Additional functions called on hits when tracing rays (alpha testing for example)\n\tstd::vector<hiprtFuncNameSet> m_func_name_sets;\n\n\t// HIPRT and Orochi contexts\n\tstd::shared_ptr<HIPRTOrochiCtx> m_hiprt_orochi_ctx = nullptr;\n\n\t// Custom stream onto which kernels are dispatched\n\toroStream_t m_main_stream = nullptr;\n\n\t// Render data passed to the GPU for rendering. Most importantly it contains\n\t// \n\t// The WorldSettings: Settings relative to the scene such as the intensity of the uniform light, the\n\t// environment map used, the rotation of the envmap, ...\n\t// \n\t// The RenderSettings: Settings that alter the way the path tracing kernel behaves such as the number\n\t// of bounces, the number of samples per kernel invocation (samples per frame),\n\t// whether or not the adaptive sampling is enabled, ...\n\tHIPRTRenderData m_render_data;\n\n\t// Structure containing the data specific to a scene:\n\t//\t- hiprtGeom\n\t//\t- hiprtMesh\n\t//\t- materials buffer\n\t//\t- materials indices\n\t// ...\n\t//\n\t// Destroying this structure frees the resources\n\tHIPRTScene m_hiprt_scene;\n\n\t// Random number generator used to fill the render_data.random_seed argument\n\t// in update_render_data().\n\tXorshift32Generator m_rng;\n\n\t// State of the animation of the renderer\n\tRendererAnimationState m_animation_state;\n\n\t// Envmap of the renderer\n\tRendererEnvmap m_envmap;\n\n\n\t// 32x32 texture containing the precomputed parameters of the LTC\n\t// fitted to approximate the SSGX sheen volumetric layer.\n\t// See SheenLTCFittedParameters.h\n\tOrochiTexture m_sheen_ltc_params;\n\n\t// Precomputed tables for GGX energy compensation\n\t// [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\n\tOrochiTexture m_GGX_conductor_directional_albedo;\n\tOrochiTexture3D m_glossy_dielectric_directional_albedo;\n\tOrochiTexture3D m_GGX_glass_directional_albedo;\n\tOrochiTexture3D m_GGX_glass_inverse_directional_albedo;\n\tOrochiTexture3D m_GGX_thin_glass_directional_albedo;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/GPURendererThread.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/GPURendererThread.h\"\n\n#include \"Threads/ThreadManager.h\"\n#include \"UI/RenderWindow.h\"\n\nvoid GPURendererThread::init(GPURenderer* renderer)\n{\n\t// Configuring the render passes\n\tm_renderer = renderer;\n\tm_render_graph = RenderGraph(renderer);\n}\n\nvoid GPURendererThread::start()\n{\n\tm_render_std_thread = std::thread(&GPURendererThread::render_thread_function, this);\n\tm_render_std_thread.detach();\n}\n\nvoid GPURendererThread::render_thread_function()\n{\n\tOROCHI_CHECK_ERROR(oroCtxSetCurrent(m_renderer->m_hiprt_orochi_ctx->orochi_ctx));\n\n\twhile (true)\n\t{\n\t\t// Wait for the signal to start rendering\n\t\tstd::unique_lock<std::mutex> lock(m_render_mutex);\n\t\tm_render_condition_variable.wait(lock, [this] { return m_frame_requested || m_exit_requested; });\n\t\tif (m_exit_requested)\n\t\t\treturn;\n\n\t\t// Reset the render requested flag\n\t\tm_frame_requested = false;\n\n\t\t// Perform rendering operations here\n\t\trender();\n\t\tpost_frame_update();\n\t}\n}\n\nvoid GPURendererThread::setup_render_passes(RenderWindow* render_window)\n{\n\tstd::shared_ptr<FillGBufferRenderPass> camera_rays_render_pass = std::make_shared<FillGBufferRenderPass>(m_renderer);\n\n\tstd::shared_ptr<NEEPlusPlusRenderPass> nee_plus_plus_render_pass = std::make_shared<NEEPlusPlusRenderPass>(m_renderer);\n\n\tstd::shared_ptr<ReGIRRenderPass> regir_render_pass = std::make_shared<ReGIRRenderPass>(m_renderer);\n\tregir_render_pass->add_dependency(camera_rays_render_pass);\n\tregir_render_pass->add_dependency(nee_plus_plus_render_pass);\n\n\tstd::shared_ptr<ReSTIRDIRenderPass> restir_di_render_pass = std::make_shared<ReSTIRDIRenderPass>(m_renderer);\n\trestir_di_render_pass->add_dependency(camera_rays_render_pass);\n\trestir_di_render_pass->add_dependency(regir_render_pass);\n\n\t// Note that the megakernel pass will only be used if ReSTIR GI is not used.\n\t// But we're still adding the render pass to the render graph in case the user\n\t// switches from ReSTIR GI to classical path tracing at runtime\n\tstd::shared_ptr<MegaKernelRenderPass> megakernel_render_pass = std::make_shared<MegaKernelRenderPass>(m_renderer);\n\tmegakernel_render_pass->add_dependency(camera_rays_render_pass);\n\tmegakernel_render_pass->add_dependency(restir_di_render_pass);\n\n\tstd::shared_ptr<ReSTIRGIRenderPass> restir_gi_render_pass = std::make_shared<ReSTIRGIRenderPass>(m_renderer);\n\trestir_gi_render_pass->add_dependency(camera_rays_render_pass);\n\trestir_gi_render_pass->add_dependency(restir_di_render_pass);\n\n\tstd::shared_ptr<GMoNRenderPass> gmon_render_pass = std::make_shared<GMoNRenderPass>(m_renderer);\n\t// GMoN depends on the main path tracing pass which is the megakernel pass or ReSTIR GI, whichever is\n\t// active\n\tgmon_render_pass->add_dependency(megakernel_render_pass);\n\tgmon_render_pass->add_dependency(restir_gi_render_pass);\n\n\tm_render_graph.add_render_pass(camera_rays_render_pass);\n\tm_render_graph.add_render_pass(nee_plus_plus_render_pass);\n\tm_render_graph.add_render_pass(regir_render_pass);\n\tm_render_graph.add_render_pass(restir_di_render_pass);\n\tm_render_graph.add_render_pass(megakernel_render_pass);\n\tm_render_graph.add_render_pass(restir_gi_render_pass);\n\tm_render_graph.add_render_pass(gmon_render_pass);\n\n\tm_render_graph.compile(m_renderer->m_hiprt_orochi_ctx, m_renderer->m_func_name_sets);\n\n\tm_render_graph.set_render_window(render_window);\n}\n\nvoid GPURendererThread::request_frame(HIPRTRenderData& render_data_for_frame, GPUKernelCompilerOptions& compiler_options_for_frame)\n{\n\tstd::lock_guard<std::mutex> lock(m_render_mutex);\n\n\tm_render_data_for_frame = render_data_for_frame;\n\tm_compiler_options_for_frame = compiler_options_for_frame.deep_copy();\n\n\tm_currently_rendering = true;\n\tm_frame_rendered = false;\n\tm_frame_requested = true;\n\tm_render_condition_variable.notify_one();\n}\n\nvoid GPURendererThread::request_exit()\n{\n\tstd::lock_guard<std::mutex> lock(m_render_mutex);\n\t\n\tm_exit_requested = true;\n\tm_render_condition_variable.notify_one();\n}\n\nvoid GPURendererThread::wait_on_render_completion()\n{\n\tstd::unique_lock<std::mutex> lock(m_render_completex_mutex);\n\n\tm_render_completed_condition_variable.wait(lock, [this] { return !m_currently_rendering; });\n}\n\nvoid GPURendererThread::pre_render_update(float delta_time, RenderWindow* render_window)\n{\n\tm_renderer->step_animations(delta_time);\n\n\tif (m_render_graph.pre_render_compilation_check(m_renderer->m_hiprt_orochi_ctx, m_renderer->m_func_name_sets, true, true))\n\t\t// Some kernels have been recompiled, renderer is now dirty\n\t\trender_window->set_render_dirty(true);\n\tm_renderer->m_render_data_buffers_invalidated |= m_render_graph.pre_render_update(delta_time);\n\n\tinternal_pre_render_update_clear_device_status_buffers();\n\tinternal_pre_render_update_global_stack_buffer();\n\tinternal_pre_render_update_adaptive_sampling_buffers();\n\n\tm_renderer->update_render_data();\n\n\tm_renderer->m_updated = true;\n}\n\nvoid GPURendererThread::internal_pre_render_update_clear_device_status_buffers()\n{\n\tunsigned char false_data = false;\n\tunsigned int zero_data = 0;\n\t// Uploading false to reset the flag\n\tm_renderer->m_status_buffers.still_one_ray_active_buffer.upload_data(&false_data);\n\t// Resetting the counter of pixels converged to 0\n\tm_renderer->m_status_buffers.pixels_converged_count_buffer.upload_data(&zero_data);\n}\n\nvoid GPURendererThread::internal_pre_render_update_adaptive_sampling_buffers()\n{\n\tbool buffers_needed = m_renderer->get_render_data().render_settings.has_access_to_adaptive_sampling_buffers();\n\n\tif (buffers_needed)\n\t{\n\t\tbool pixels_squared_luminance_needs_resize = m_renderer->m_pixels_squared_luminance_buffer.size() == 0;\n\t\tbool pixels_sample_count_needs_resize = m_renderer->m_pixels_sample_count_buffer.size() == 0;\n\t\tbool pixels_converged_sample_count_needs_resize = m_renderer->m_pixels_converged_sample_count_buffer->size() == 0;\n\n\t\tif (pixels_squared_luminance_needs_resize || pixels_sample_count_needs_resize || pixels_converged_sample_count_needs_resize)\n\t\t\t// At least on buffer is going to be resized so buffers are invalidated\n\t\t\tm_renderer->m_render_data_buffers_invalidated = true;\n\n\t\tif (pixels_squared_luminance_needs_resize)\n\t\t\t// Only allocating if it isn't already\n\t\t\tm_renderer->m_pixels_squared_luminance_buffer.resize(m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y);\n\n\t\tif (pixels_sample_count_needs_resize)\n\t\t\t// Only allocating if it isn't already\n\t\t\tm_renderer->m_pixels_sample_count_buffer.resize(m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y);\n\n\t\tif (pixels_converged_sample_count_needs_resize)\n\t\t\tm_renderer->m_pixels_converged_sample_count_buffer->resize(m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y);\n\n\t}\n\telse\n\t{\n\t\tif (m_renderer->m_pixels_squared_luminance_buffer.size() > 0 || m_renderer->m_pixels_sample_count_buffer.size() > 0 || m_renderer->m_pixels_converged_sample_count_buffer->size() > 0)\n\t\t{\n\t\t\tm_renderer->m_pixels_squared_luminance_buffer.free();\n\t\t\tm_renderer->m_pixels_sample_count_buffer.free();\n\t\t\tm_renderer->m_pixels_converged_sample_count_buffer->free();\n\n\t\t\tm_renderer->m_render_data_buffers_invalidated = true;\n\t\t}\n\t}\n}\n\nvoid GPURendererThread::internal_pre_render_update_global_stack_buffer()\n{\n\tif (m_renderer->needs_global_bvh_stack_buffer())\n\t{\n\t\tbool buffer_needs_update = false;\n\t\t// Buffer isn't allocated\n\t\tbuffer_needs_update |= m_renderer->get_render_data().global_traversal_stack_buffer.stackData == nullptr;\n\t\t// Buffer is allocated but the stack size has been changed (through ImGui probably)\n\t\tbuffer_needs_update |= m_renderer->get_render_data().global_traversal_stack_buffer_size != m_renderer->get_render_data().global_traversal_stack_buffer.stackSize;\n\n\t\tif (buffer_needs_update)\n\t\t\tm_renderer->recreate_global_bvh_stack_buffer();\n\t}\n\telse\n\t{\n\t\tif (m_renderer->get_render_data().global_traversal_stack_buffer.stackData != nullptr)\n\t\t{\n\t\t\t// Freeing if the buffer already exists\n\t\t\tHIPRT_CHECK_ERROR(hiprtDestroyGlobalStackBuffer(m_renderer->m_hiprt_orochi_ctx->hiprt_ctx, m_renderer->get_render_data().global_traversal_stack_buffer));\n\t\t\tm_renderer->get_render_data().global_traversal_stack_buffer.stackData = nullptr;\n\t\t}\n\t}\n}\n\nvoid GPURendererThread::post_sample_update(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tm_render_graph.post_sample_update_async(render_data, compiler_options);\n\n\trender_data.render_settings.sample_number++;\n\tm_renderer->get_render_data().render_settings.sample_number++;\n\n\trender_data.render_settings.denoiser_AOV_accumulation_counter++;\n\tm_renderer->get_render_data().render_settings.denoiser_AOV_accumulation_counter++;\n\n\t// We only reset once so after rendering a frame, we're sure that we don't need to reset anymore \n\t// so we're setting the flag to false (it will be set to true again if we need to reset the render\n\t// again)\n\trender_data.render_settings.need_to_reset = false;\n\tm_renderer->get_render_data().render_settings.need_to_reset = false;\n\n\trender_data.nee_plus_plus.m_reset_visibility_map = false;\n\tm_renderer->get_render_data().nee_plus_plus.m_reset_visibility_map = false;\n}\n\nvoid GPURendererThread::post_frame_update()\n{\n\t// Saving the current frame camera to be the previous camera of the next frame\n\tm_renderer->m_previous_frame_camera = m_renderer->m_camera;\n}\n\nvoid GPURendererThread::render()\n{\n\tif (!m_renderer->m_updated)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"render() was called on the GPURenderer without update() being called.\");\n\t\tUtils::debugbreak();\n\n\t\treturn;\n\t}\n\n\t// Resetting the update state since we're now rendering a new frame\n\tm_renderer->m_updated = false;\n\n\t// Making sure kernels are compiled\n\tThreadManager::join_threads(ThreadManager::COMPILE_KERNELS_THREAD_KEY);\n\n\tif (m_debug_trace_kernel.has_been_compiled())\n\t\trender_debug_kernel();\n\telse\n\t\trender_path_tracing();\n}\n\nvoid GPURendererThread::render_debug_kernel()\n{\n\tm_frame_rendered = false;\n\n\tGPUKernelCompilerOptions compiler_options_copy = m_renderer->m_global_compiler_options->deep_copy();\n\t// Copying the render data here to avoid race concurrency issues with\n\t// the asynchronous ImGui UI which may also modifiy the render data\n\tHIPRTRenderData render_data_copy = m_renderer->get_render_data();\n\n\t// Updating the previous and current camera\n\trender_data_copy.current_camera = m_renderer->m_camera.to_hiprt(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\trender_data_copy.prev_camera = m_renderer->m_previous_frame_camera.to_hiprt(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\n\tlaunch_debug_kernel(render_data_copy);\n\n\t// Recording GPU frame time stop timestamp and computing the frame time\n\tstruct CallbackPayload\n\t{\n\t\tbool* frame_rendered;\n\t\tbool* currently_rendering;\n\t\tstd::condition_variable* render_completed_condition_variable;\n\t};\n\n\tCallbackPayload* payload = new CallbackPayload;\n\tpayload->currently_rendering = &m_currently_rendering;\n\tpayload->frame_rendered = &m_frame_rendered;\n\tpayload->render_completed_condition_variable = &m_render_completed_condition_variable;\n\n\tOROCHI_CHECK_ERROR(oroLaunchHostFunc(m_renderer->get_main_stream(), [](void* payload) \n\t{\n\t\tCallbackPayload* payload_struct = reinterpret_cast<CallbackPayload*>(payload);\n\t\t*payload_struct->frame_rendered = true;\n\t\t*payload_struct->currently_rendering = false;\n\t\tpayload_struct->render_completed_condition_variable->notify_all();\n\n\t\tdelete payload_struct;\n\t}, payload));\n\n\tpost_sample_update(render_data_copy, compiler_options_copy);\n}\n\nGPUKernel& GPURendererThread::get_debug_trace_kernel()\n{\n\treturn m_debug_trace_kernel;\n}\n\nvoid GPURendererThread::launch_debug_kernel(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data, &m_renderer->m_render_resolution };\n\n\tm_renderer->get_render_data().random_number = m_renderer->m_rng.xorshift32();\n\tm_debug_trace_kernel.launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid GPURendererThread::render_path_tracing()\n{\n\tm_frame_rendered = false;\n\n\t// Updating the previous and current camera\n\tm_render_data_for_frame.current_camera = m_renderer->m_camera.to_hiprt(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\tm_render_data_for_frame.prev_camera = m_renderer->m_previous_frame_camera.to_hiprt(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\n\tfor (int i = 1; i <= m_render_data_for_frame.render_settings.samples_per_frame; i++)\n\t{\n\t\tif (i == m_render_data_for_frame.render_settings.samples_per_frame)\n\t\t\t// Last sample of the frame so we are going to enable the update \n\t\t\t// of the status buffers (number of pixels converged, how many rays still\n\t\t\t// active, ...)\n\t\t\tm_render_data_for_frame.render_settings.do_update_status_buffers = true;\n\n\t\tm_render_graph.launch_async(m_render_data_for_frame, m_compiler_options_for_frame);\n\n\t\tpost_sample_update(m_render_data_for_frame, m_compiler_options_for_frame);\n\t}\n\n\t// Recording GPU frame time stop timestamp and computing the frame time\n\tstruct CallbackPayload\n\t{\n\t\tbool* frame_rendered;\n\t\tbool* currently_rendering;\n\t\tstd::condition_variable* render_completed_condition_variable;\n\t};\n\n\tCallbackPayload* payload = new CallbackPayload;\n\tpayload->currently_rendering = &m_currently_rendering;\n\tpayload->frame_rendered = &m_frame_rendered;\n\tpayload->render_completed_condition_variable = &m_render_completed_condition_variable;\n\n\tOROCHI_CHECK_ERROR(oroLaunchHostFunc(m_renderer->get_main_stream(), [](void* payload) \n\t{\n\t\tCallbackPayload* payload_struct = reinterpret_cast<CallbackPayload*>(payload);\n\t\t*payload_struct->frame_rendered = true;\n\t\t*payload_struct->currently_rendering = false;\n\t\tpayload_struct->render_completed_condition_variable->notify_all();\n\n\t\tdelete payload_struct;\n\t}, payload));\n\n\tm_renderer->m_was_last_frame_low_resolution = m_renderer->get_render_data().render_settings.do_render_low_resolution();\n\t// We just rendered a new frame so we're setting this flag to true\n\t// such that the animated components of the scene are not allowed to step\n\t// their animations until the render window signals the renderer the the\n\t// frame has been fully rendered and thus that the animations can step forward\n\tm_renderer->m_animation_state.can_step_animation = false;\n}\n\nRenderGraph& GPURendererThread::get_render_graph()\n{\n\treturn m_render_graph;\n}\n\nstd::shared_ptr<GMoNRenderPass> GPURendererThread::get_gmon_render_pass()\n{\n\treturn std::dynamic_pointer_cast<GMoNRenderPass>(m_render_graph.get_render_pass(GMoNRenderPass::GMON_RENDER_PASS_NAME));\n}\n\nstd::shared_ptr<ReGIRRenderPass> GPURendererThread::get_ReGIR_render_pass()\n{\n\treturn std::dynamic_pointer_cast<ReGIRRenderPass>(m_render_graph.get_render_pass(ReGIRRenderPass::REGIR_RENDER_PASS_NAME));\n}\n\nstd::shared_ptr<ReSTIRDIRenderPass> GPURendererThread::get_ReSTIR_DI_render_pass()\n{\n\treturn std::dynamic_pointer_cast<ReSTIRDIRenderPass>(m_render_graph.get_render_pass(ReSTIRDIRenderPass::RESTIR_DI_RENDER_PASS_NAME));\n}\n\nstd::shared_ptr<ReSTIRGIRenderPass> GPURendererThread::get_ReSTIR_GI_render_pass()\n{\n\treturn std::dynamic_pointer_cast<ReSTIRGIRenderPass>(m_render_graph.get_render_pass(ReSTIRGIRenderPass::RESTIR_GI_RENDER_PASS_NAME));\n}\n\nstd::shared_ptr<NEEPlusPlusRenderPass> GPURendererThread::get_NEE_plus_plus_render_pass()\n{\n\treturn std::dynamic_pointer_cast<NEEPlusPlusRenderPass>(m_render_graph.get_render_pass(NEEPlusPlusRenderPass::NEE_PLUS_PLUS_RENDER_PASS_NAME));\n}\n\nbool GPURendererThread::frame_render_done()\n{\n\treturn m_frame_rendered;\n}\n"
  },
  {
    "path": "src/Renderer/GPURendererThread.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef GPU_RENDERER_THREAD_H\n#define GPU_RENDERER_THREAD_H\n\n#include \"Renderer/RenderPasses/RenderGraph.h\"\n#include \"RenderPasses/FillGBufferRenderPass.h\"\n#include \"RenderPasses/GMoNRenderPass.h\"\n#include \"RenderPasses/NEEPlusPlusRenderPass.h\"\n#include \"RenderPasses/ReGIRRenderPass.h\"\n#include \"RenderPasses/ReSTIRDIRenderPass.h\"\n#include \"RenderPasses/ReSTIRGIRenderPass.h\"\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include <condition_variable>\n#include <memory>\n\nclass GPURenderer;\nclass RenderWindow;\n\nclass GPURendererThread\n{\npublic:\n\tGPURendererThread() {}\n\n\tvoid init(GPURenderer* renderer);\n\n\tvoid start();\n\tvoid render_thread_function();\n\n\t/**\n\t * Initializes and compiles the kernels\n\t */\n\tvoid setup_render_passes(RenderWindow* render_window);\n\n\tvoid request_frame(HIPRTRenderData& render_data_for_frame, GPUKernelCompilerOptions& compiler_options_for_frame);\n\tvoid request_exit();\n\t/**\n\t * If a frame is currently being computed, this function blocks until the frame has been rendered\n\t */\n\tvoid wait_on_render_completion();\n\n\t/**\n\t * This function is in charge of updating various \"dynamic attributes/properties/buffers\" of the renderer before rendering a frame.\n\t *\n\t * These \"dynamic attributes/properties/buffers\" can be the adaptive sampling buffers for example.\n\t *\n\t * It will be checked each whether or not the adaptive sampling buffers need to be\n\t * allocated or freed and action will be taken accordingly. This function basically enables a\n\t * nice behavior of the application in which the renderer \"automatically\" reacts to changes\n\t * that could be made (through the ImGui interface for example) so that it is always in the\n\t * correct state. Said othrewise, this function can be seen as a centralized place for updating\n\t * various stuff of the renderer instead of having to scatter these update calls everywhere\n\t * in the code.\n\t *\n\t * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n\t * call to pre_render_update()\n\t */\n\tvoid pre_render_update(float delta_time, RenderWindow* render_window);\n\n\t/**\n\t * This function increments some counters (such as the number of samples rendered so far) after a\n\t * sample has been rendered\n\t */\n\tvoid post_sample_update(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options);\n\n\t/**\n\t * Updates stuff after a whole frame has been rendered\n\t */\n\tvoid post_frame_update();\n\n\t/**\n\t * Renders a frame asynchronously.\n\t * Querry frame_render_done() to know whether or not the frame has completed or not.\n\t */\n\tvoid render();\n\n\t/**\n\t * This just renders a frame by calling all the path tracing kernels.\n\t * Nothing special.\n\t *\n\t * This function is basically the \"opposite\" of 'render_debug_kernel'\n\t */\n\tvoid render_path_tracing();\n\t/**\n\t * This function launches the 'm_debug_trace_kernel' and saves its execution time\n\t * in 'm_render_pass_times[GPURenderer::DEBUG_KERNEL_TIME_KEY]'\n\t */\n\tvoid render_debug_kernel();\n\tGPUKernel& get_debug_trace_kernel();\n\n\tRenderGraph& get_render_graph();\n\tstd::shared_ptr<GMoNRenderPass> get_gmon_render_pass();\n\tstd::shared_ptr<NEEPlusPlusRenderPass> get_NEE_plus_plus_render_pass();\n\tstd::shared_ptr<ReGIRRenderPass> get_ReGIR_render_pass();\n\tstd::shared_ptr<ReSTIRDIRenderPass> get_ReSTIR_DI_render_pass();\n\tstd::shared_ptr<ReSTIRGIRenderPass> get_ReSTIR_GI_render_pass();\n\n\t/**\n\t * Returns false if the frame queued asynchronously by a previous call to render()\n\t * isn't finished yet.\n\t * Returns true if the frame is completed\n\t */\n\tbool frame_render_done();\n\nprivate:\n\t/**\n\t * Resets the value of the status buffers on the device\n\t */\n\tvoid internal_pre_render_update_clear_device_status_buffers();\n\n\t/**\n\t * This function evaluates whether the renderer needs the adaptive\n\t * sampling buffers or not. If the buffers are needed (because the\n\t * adaptive sampling or the stop noise pixel threshold is enabled for example),\n\t * then the buffer will be allocated so that they can be used by the shader.\n\t * If they are not needed, they will be freed to save some VRAM.\n\t */\n\tvoid internal_pre_render_update_adaptive_sampling_buffers();\n\n\t/**\n\t * Allocates/frees the global buffer for BVH traversal when UseSharedStackBVHTraversal is TRUE\n\t */\n\tvoid internal_pre_render_update_global_stack_buffer();\n\n\tvoid launch_debug_kernel(HIPRTRenderData& render_data);\t\n\n\tGPURenderer* m_renderer = nullptr;\n\t// This is the render data structure that is going to be used for all\n\t// render pass dispatches to avoid concurrency races with the ImGui UI\n\t//\n\t// The render data structure of the renderer (which is the structure that the UI modifies)\n\t// is copied into this structure when a frame is requested and this copied structure is then\n\t// used for all render pass dispatches.\n\t//\n\t// Doing the copy and only using the copy ensures that the structure isn't modified by the UI\n\t// while some render passes are doing their rendering (which could happen if render passes were\n\t// reading directly from the render data structure of the renderer which the UI modifies)\n\tHIPRTRenderData m_render_data_for_frame;\n\t// Same for the compiler options\n\tGPUKernelCompilerOptions m_compiler_options_for_frame;\n\tRenderGraph m_render_graph;\n\n\n\t// Whether or not the frame queued on the GPU by the last call to render() \n\t// is done rendering or not\n\tbool m_frame_rendered = true;\n\n\t// If this kernel isn't empty, then it will be used instead of all the regular path tracing\n\t// kernels.\n\t// \n\t// This can be useful for debugging performance for example: write a very simple trace kernel\n\t// that just trace camera rays and set this kernel as the debug kernel and you'll be able to\n\t// see the raw ray tracing performance without any scuff\n\tGPUKernel m_debug_trace_kernel;\n\n\tstd::thread m_render_std_thread;\n\n\tstd::condition_variable m_render_condition_variable;\n\tstd::mutex m_render_mutex;\n\n\tstd::condition_variable m_render_completed_condition_variable;\n\tstd::mutex m_render_completex_mutex;\n\n\tbool m_currently_rendering = false;\n\tbool m_frame_requested = false;\n\tbool m_exit_requested = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/HardwareAccelerationSupport.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef HARDWARE_ACCELERATION_SUPPORT_H\n#define HARDWARE_ACCELERATION_SUPPORT_H\n\nenum HardwareAccelerationSupport\n{\n\tSUPPORTED,\n\tAMD_UNSUPPORTED,\n\tNVIDIA_UNSUPPORTED\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/OpenImageDenoiser.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/OpenImageDenoiser.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n\n#include <iostream>\n\nOpenImageDenoiser::OpenImageDenoiser()\n{\n    m_device = nullptr;\n    m_denoised_buffer = nullptr;\n}\n\nvoid OpenImageDenoiser::set_use_normals(bool use_normals)\n{\n    m_use_normals = use_normals;\n}\n\nvoid OpenImageDenoiser::set_denoise_normals(bool denoise_normals_or_not)\n{\n    m_denoise_normals  = denoise_normals_or_not;\n}\n\nvoid OpenImageDenoiser::set_use_albedo(bool use_albedo)\n{\n    m_use_albedo = use_albedo;\n}\n\nvoid OpenImageDenoiser::set_denoise_albedo(bool denoise_albedo_or_not)\n{\n    m_denoise_albedo = denoise_albedo_or_not;\n}\n\nvoid OpenImageDenoiser::resize(int new_width, int new_height)\n{\n    return;\n    if (!check_valid_state())\n        return;\n\n    m_width = new_width;\n    m_height = new_height;\n\n    m_denoised_buffer = m_device.newBuffer(sizeof(ColorRGB32F) * new_width * new_height, oidn::Storage::Managed);\n    m_input_color_buffer_oidn = m_device.newBuffer(sizeof(ColorRGB32F) * new_width * new_height, oidn::Storage::Managed);\n}\n\nvoid OpenImageDenoiser::initialize()\n{\n    create_device();\n}\n\nvoid OpenImageDenoiser::finalize()\n{\n    return;\n    if (!check_valid_state())\n        return;\n\n    m_beauty_filter = m_device.newFilter(\"RT\");\n    m_beauty_filter.setImage(\"color\", m_input_color_buffer_oidn, oidn::Format::Float3, m_width, m_height);\n    m_beauty_filter.setImage(\"output\", m_denoised_buffer, oidn::Format::Float3, m_width, m_height);\n    m_beauty_filter.set(\"cleanAux\", m_denoise_albedo && m_denoise_normals);\n    m_beauty_filter.set(\"hdr\", true);\n\n    if (m_use_normals)\n    {\n        // Creating the buffers here instead of in resize() because we want the creation/destruction \n        // to be dynamic in response to ImGui input so we cannot just wait for a window queue_resize event\n        // that would trigger OpenImageDenoiser::queue_resize()\n        m_normals_buffer_denoised_oidn = m_device.newBuffer(sizeof(float3) * m_width * m_height);\n        \n        m_beauty_filter.setImage(\"normal\", m_normals_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n    }\n    else\n        // Destroying the filter & buffer to free memory\n        m_normals_buffer_denoised_oidn = nullptr;\n\n    if (m_denoise_normals && m_use_normals)\n    {\n        m_normals_filter = m_device.newFilter(\"RT\");\n        m_normals_filter.setImage(\"normal\", m_normals_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n        m_normals_filter.setImage(\"output\", m_normals_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n        m_normals_filter.commit();\n    }\n    else\n        m_normals_filter = nullptr;\n\n    if (m_use_albedo)\n    {\n        // Creating the buffers here instead of in resize() because we want the creation/destruction \n        // to be dynamic in response to ImGui input so we cannot just wait for a window queue_resize event\n        // that would trigger OpenImageDenoiser::queue_resize()\n        m_albedo_buffer_denoised_oidn = m_device.newBuffer(sizeof(ColorRGB32F) * m_width * m_height, oidn::Storage::Managed);\n\n        m_beauty_filter.setImage(\"albedo\", m_albedo_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n    }\n    else\n        // Destroying the filter & buffer to free memory\n        m_albedo_buffer_denoised_oidn = nullptr;\n\n    if (m_denoise_albedo && m_use_albedo)\n    {\n        m_albedo_filter = m_device.newFilter(\"RT\");\n        m_albedo_filter.setImage(\"albedo\", m_albedo_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n        m_albedo_filter.setImage(\"output\", m_albedo_buffer_denoised_oidn, oidn::Format::Float3, m_width, m_height);\n        m_albedo_filter.commit();\n    }\n    else\n        m_albedo_filter = nullptr;\n\n    m_beauty_filter.commit();\n}\n\nvoid OpenImageDenoiser::create_device()\n{\n    // Create an Open ImageRGB32F Denoise device on the GPU depending\n    // on whether we're running on an NVIDIA or AMD GPU\n    //\n    // -1 and nullptr correspond respectively to the default CUDA/HIP device\n    // and the default stream\n#ifdef OROCHI_ENABLE_CUEW\n    m_device = oidn::newDevice(oidn::DeviceType::CUDA);\n#else\n    m_device = oidn::newDevice(oidn::DeviceType::HIP);\n#endif\n\n    if (m_device.getError() == oidn::Error::UnsupportedHardware)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Could not create an OIDN GPU device. Falling back to CPU...\");\n\n        m_device = oidn::newDevice(oidn::DeviceType::CPU);\n\n        const char* errorMessage;\n        if (m_device.getError(errorMessage) != oidn::Error::None)\n        {\n            g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"There was an error getting a CPU device for denoising with OIDN. Denoiser will be unavailable. %s\", errorMessage);\n\n            m_denoiser_invalid = true;\n            return;\n        }\n        else\n            // Valid creation of a CPU device\n            m_cpu_device = true;\n    }\n\n    bool managedMemory = m_device.get<bool>(\"managedMemorySupported\");\n\n    m_device.commit();\n}\n\nbool OpenImageDenoiser::check_valid_state()\n{\n    if (m_denoiser_invalid)\n        // Returning false without error message, the error was already printed when we failed at creating the device\n        return false;\n    else if (!check_device())\n        // check_device prints the error\n        return false;\n\n    return true;\n}\n\nbool OpenImageDenoiser::check_device()\n{\n    if (m_device.getHandle() == nullptr)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"OIDN denoiser's device isn't initialized...\");\n\n        return false;\n    }\n\n    return true;\n}\n\nbool OpenImageDenoiser::check_buffer_sizes()\n{\n    size_t normals_buffer_size = m_normals_buffer_denoised_oidn.getSize() / sizeof(float3);\n    size_t albedo_buffer_size = m_albedo_buffer_denoised_oidn.getSize() / sizeof(ColorRGB32F);\n    size_t denoised_buffer_size = m_denoised_buffer.getSize() / sizeof(ColorRGB32F);\n    size_t noisy_input_buffer_size = m_input_color_buffer_oidn.getSize() / sizeof(ColorRGB32F);\n\n    if (m_use_normals && normals_buffer_size != m_width * m_height)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"The denoiser normals buffer isn't the same size as the denoiser. Did you forget to call finalize() after a call to resize()?\");\n\n        return false;\n    }\n    else if (m_use_albedo && albedo_buffer_size != m_width * m_height)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"The denoiser albedo buffer isn't the same size as the denoiser. Did you forget to call finalize() after a call to resize()?\");\n\n        return false;\n    }\n    else if (denoised_buffer_size != m_width * m_height || noisy_input_buffer_size != m_width * m_height)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"The denoiser output buffer or input noisy buffer isn't the same size as the denoiser. This has to be an internal error since resize() resizes these two buffers.\");\n\n        return false;\n    }\n\n    return true;\n}\n\nvoid OpenImageDenoiser::denoise(ColorRGB32F* data_to_denoise_device_pointer, float3* normals_aov_device_pointer, ColorRGB32F* albedo_aov_device_pointer)\n{\n    if (!check_valid_state())\n        return;\n\n    if (!check_buffer_sizes())\n        return;\n\n    oroMemcpyKind memcpyKind = m_cpu_device ? oroMemcpyDeviceToHost : oroMemcpyDeviceToDevice;\n\n    if (normals_aov_device_pointer != nullptr)\n    {\n        OROCHI_CHECK_ERROR(oroMemcpy(m_normals_buffer_denoised_oidn.getData(), normals_aov_device_pointer, sizeof(float3) * m_width * m_height, memcpyKind));\n\n        if (m_denoise_normals)\n            m_normals_filter.execute();\n    }\n\n    if (albedo_aov_device_pointer != nullptr)\n    {\n        OROCHI_CHECK_ERROR(oroMemcpy(m_albedo_buffer_denoised_oidn.getData(), albedo_aov_device_pointer, sizeof(ColorRGB32F) * m_width * m_height, memcpyKind));\n\n        if (m_denoise_albedo)\n            m_albedo_filter.execute();\n    }\n\n    OROCHI_CHECK_ERROR(oroMemcpy(m_input_color_buffer_oidn.getData(), data_to_denoise_device_pointer, sizeof(ColorRGB32F) * m_width * m_height, memcpyKind));\n    m_beauty_filter.execute();\n}\n\nvoid OpenImageDenoiser::denoise(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> data_to_denoise, std::shared_ptr<OpenGLInteropBuffer<float3>> normals_aov, std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> albedo_aov)\n{\n    if (!check_valid_state())\n        return;\n\n    if (!check_buffer_sizes())\n        return;\n\n    oroMemcpyKind memcpyKind = m_cpu_device ? oroMemcpyDeviceToHost : oroMemcpyDeviceToDevice;\n\n    float3* normals_pointer = nullptr;\n    if (normals_aov != nullptr)\n        normals_pointer = normals_aov->map();\n\n    ColorRGB32F* albedo_pointer = nullptr;\n    if (albedo_aov != nullptr)\n        albedo_pointer = albedo_aov->map();\n\n    ColorRGB32F* data_to_denoise_pointer = data_to_denoise->map();\n\n    denoise(data_to_denoise_pointer, normals_pointer, albedo_pointer);\n    normals_aov->unmap();\n    albedo_aov->unmap();\n    data_to_denoise->unmap();\n}\n\nvoid OpenImageDenoiser::denoise(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> data_to_denoise, std::shared_ptr<OrochiBuffer<float3>> normals_aov, std::shared_ptr<OrochiBuffer<ColorRGB32F>> albedo_aov)\n{\n    if (!check_valid_state())\n        return;\n\n    if (!check_buffer_sizes())\n        return;\n\n    oroMemcpyKind memcpyKind = m_cpu_device ? oroMemcpyDeviceToHost : oroMemcpyDeviceToDevice;\n\n    float3* normals_pointer = nullptr;\n    if (normals_aov != nullptr)\n        normals_pointer = normals_aov->get_device_pointer();\n\n    ColorRGB32F* albedo_pointer = nullptr;\n    if (albedo_aov != nullptr)\n        albedo_pointer = albedo_aov->get_device_pointer();\n\n    ColorRGB32F* data_to_denoise_pointer = data_to_denoise->map();\n\n    denoise(data_to_denoise_pointer, normals_pointer, albedo_pointer);\n\n    data_to_denoise->unmap();\n}\n\nvoid OpenImageDenoiser::copy_denoised_data_to_buffer(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> out_buffer)\n{\n    oroMemcpyKind memcpyKind;\n    ColorRGB32F* buffer_pointer;\n\n    memcpyKind = m_cpu_device ? oroMemcpyHostToDevice : oroMemcpyDeviceToDevice;\n    buffer_pointer = out_buffer->map();\n    OROCHI_CHECK_ERROR(oroMemcpy(buffer_pointer, m_denoised_buffer.getData(), sizeof(ColorRGB32F) * m_width * m_height, memcpyKind));\n    out_buffer->unmap();\n}\n\nvoid OpenImageDenoiser::copy_denoised_data_to_buffer(std::shared_ptr<OrochiBuffer<ColorRGB32F>> out_buffer)\n{\n    oroMemcpyKind memcpyKind = m_cpu_device ? oroMemcpyHostToDevice : oroMemcpyDeviceToDevice;\n    ColorRGB32F* buffer_pointer = out_buffer->get_device_pointer();\n\n    OROCHI_CHECK_ERROR(oroMemcpy(buffer_pointer, m_denoised_buffer.getData(), sizeof(ColorRGB32F) * m_width * m_height, memcpyKind));\n}\n"
  },
  {
    "path": "src/Renderer/OpenImageDenoiser.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef OPEN_IMAGE_DENOISER\n#define OPEN_IMAGE_DENOISER\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/Color.h\"\n#include \"OpenGL/OpenGLInteropBuffer.h\"\n\n#include <OpenImageDenoise/oidn.hpp>\n#include <vector>\n\nclass OpenImageDenoiser\n{\npublic:\n\tOpenImageDenoiser();\n\n\tvoid set_use_albedo(bool use_albedo);\n\tvoid set_denoise_albedo(bool denoise_normals_or_not);\n\tvoid set_use_normals(bool use_normal);\n\tvoid set_denoise_normals(bool denoise_normals_or_not);\n\n\tvoid initialize();\n\n\t/**\n\t * Resizes the buffers of this denoiser. Don't forget to call finalize() after calling resize()!\n\t */\n\tvoid resize(int new_width, int new_height);\n\n\t/**\n\t * Function that finalizes the creation of the internal denoising\n\t * filters etc... once everything is setup (set_use_albedo / set_use_normals\n\t * have been called if necessary, subsequent buffers have been provided, ...)\n\t*/\n\tvoid finalize();\n\n\t/**\n\t * Denoises 'data_to_denoise' and uses the AOVs to improve denoising quality if provided\n\t * and if normals/albedo denoising is enabled on the denoiser.\n\t * \n\t * See set_use_albedo(bool use_albedo), set_denoise_albedo(bool denoise_normals_or_not), set_use_normals(bool use_normal), ...\n\t */\n\tvoid denoise(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> data_to_denoise, \n\t\t\t\t std::shared_ptr<OpenGLInteropBuffer<float3>> normals_aov = nullptr, \n\t\t\t\t std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> albedo_aov = nullptr);\n\t/**\n\t * Overload to denoise from non OpenGL Interop AOV buffers\n\t */\n\tvoid denoise(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> data_to_denoise,\n\t\t\t\t std::shared_ptr<OrochiBuffer<float3>> normals_aov,\n\t\t\t\t std::shared_ptr<OrochiBuffer<ColorRGB32F>> albedo_aov);\n\t/**\n\t * Function used to copy the denoiser result after a call to denoise() to a given buffer\n\t */\n\tvoid copy_denoised_data_to_buffer(std::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> out_buffer);\n\t/**\n\t * Overload for non-interop buffers\n\t */\n\tvoid copy_denoised_data_to_buffer(std::shared_ptr<OrochiBuffer<ColorRGB32F>> out_buffer);\n\n\nprivate:\n\tvoid create_device();\n\n\tbool check_valid_state();\n\tbool check_device();\n\tbool check_buffer_sizes();\n\n\t// Internal denoise function that takes raw pointers\n\tvoid denoise(ColorRGB32F* data_to_denoise, float3* normals_aov, ColorRGB32F*);\n\n\tbool m_use_albedo = false;\n\tbool m_denoise_albedo = true;\n\tbool m_use_normals = false;\n\tbool m_denoise_normals = true;\n\n\tint m_width, m_height;\n\n\t// If true, this means that we couldn't get a device to denoise with\n\tbool m_denoiser_invalid = false;\n\t// If true, we're using a CPU device and we're going to have to adapt\n\t// the way we copy the GPU framebuffer to the OIDN buffers i.e. we're\n\t// going to have to use memcpyDeviceToHost instead of memcpyDeviceToDevice\n\tbool m_cpu_device = false;\n\toidn::DeviceRef m_device;\n\n\toidn::FilterRef m_beauty_filter = nullptr;\n\toidn::FilterRef m_albedo_filter = nullptr;\n\toidn::FilterRef m_normals_filter = nullptr;\n\n\toidn::BufferRef m_input_color_buffer_oidn;\n\toidn::BufferRef m_normals_buffer_denoised_oidn = nullptr;\n\toidn::BufferRef m_albedo_buffer_denoised_oidn = nullptr;\n\toidn::BufferRef m_denoised_buffer;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/FillGBufferRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/FillGBufferRenderPass.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n\n#include <memory>\n\nconst std::string FillGBufferRenderPass::FILL_GBUFFER_RENDER_PASS_NAME = \"Fill G-Buffer Render Pass\";\nconst std::string FillGBufferRenderPass::FILL_GBUFFER_KERNEL = \"Fill G-Buffer\";\n\nFillGBufferRenderPass::FillGBufferRenderPass() : FillGBufferRenderPass(nullptr) {}\nFillGBufferRenderPass::FillGBufferRenderPass(GPURenderer* renderer) : RenderPass(renderer, FillGBufferRenderPass::FILL_GBUFFER_RENDER_PASS_NAME)\n{\n\tm_render_resolution = m_renderer->m_render_resolution;\n\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL] = std::make_shared<GPUKernel>();\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/CameraRays.h\");\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->set_kernel_function_name(\"CameraRays\");\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->synchronize_options_with(m_renderer->get_global_compiler_options(), GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n}\n\nvoid FillGBufferRenderPass::compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n\t// Configuring the kernel that will be used to retrieve the size of the RayVolumeState structure.\n\t// This size will be needed to resize the 'ray_volume_states' buffer in the GBuffer if the nested dielectrics\n\t// stack size changes\n\t//\n\t// We're compiling it serially so that we're sure that we can retrieve the RayVolumeState size on the GPU after the\n\t// GPURenderer is constructed (because this renderer pass is compiled during the construction of the GPURenderer)\n\n\tm_ray_volume_state_byte_size_kernel = std::make_shared<GPUKernel>();\n\tm_ray_volume_state_byte_size_kernel->set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/Utils/RayVolumeStateSize.h\");\n\tm_ray_volume_state_byte_size_kernel->set_kernel_function_name(\"RayVolumeStateSize\");\n\tm_ray_volume_state_byte_size_kernel->synchronize_options_with(m_renderer->get_global_compiler_options(), GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tThreadManager::start_serial_thread(ThreadManager::COMPILE_RAY_VOLUME_STATE_SIZE_KERNEL_KEY, ThreadFunctions::compile_kernel_silent, m_ray_volume_state_byte_size_kernel, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\tRenderPass::compile(hiprt_orochi_ctx, func_name_sets);\n}\n\nvoid FillGBufferRenderPass::resize(unsigned int new_width, unsigned int new_height)\n{\n\tm_g_buffer.resize(new_width * new_height, get_ray_volume_state_byte_size());\n\n\tif (m_renderer->get_render_data().render_settings.use_prev_frame_g_buffer(m_renderer))\n\t\tm_g_buffer_prev_frame.resize(new_width * new_height, get_ray_volume_state_byte_size());\n\n\tm_render_resolution = m_renderer->m_render_resolution;\n}\n\nbool FillGBufferRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (render_data.render_settings.use_prev_frame_g_buffer(m_renderer))\n\t{\n\t\t// If at least one buffer has a size of 0, we assume that this means that the whole G-buffer is deallocated\n\t\t// and so we're going to have to reallocate it\n\t\tbool prev_frame_g_buffer_needs_resize = m_g_buffer_prev_frame.first_hit_prim_index.size() == 0;\n\n\t\tif (prev_frame_g_buffer_needs_resize)\n\t\t{\n\t\t\tm_g_buffer_prev_frame.resize(m_render_resolution.x * m_render_resolution.y, get_ray_volume_state_byte_size());\n\t\t\treturn true;\n\t\t}\n\t}\n\telse\n\t{\n\t\t// If we're not using the G-buffer, indicating that in use_last_frame_g_buffer so that the shader doesn't\n\t\t// try to use it\n\n\t\tif (m_g_buffer_prev_frame.first_hit_prim_index.size() > 0)\n\t\t{\n\t\t\t// If the buffers aren't freed already\n\t\t\tm_g_buffer_prev_frame.free();\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\nbool FillGBufferRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[FillGBufferRenderPass::FILL_GBUFFER_KERNEL]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_render_resolution.x, m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\n\treturn true;\n}\n\nvoid FillGBufferRenderPass::update_render_data()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\trender_data.g_buffer = m_g_buffer.get_device_g_buffer();\n\n\tif (render_data.render_settings.use_prev_frame_g_buffer(m_renderer))\n\t\t// Only setting the pointers of the buffers if we're actually using the g-buffer of the previous frame\n\t\trender_data.g_buffer_prev_frame = m_g_buffer_prev_frame.get_device_g_buffer();\n\telse\n\t{\n\t\trender_data.g_buffer_prev_frame.materials = nullptr;\n\t\trender_data.g_buffer_prev_frame.geometric_normals = nullptr;\n\t\trender_data.g_buffer_prev_frame.shading_normals = nullptr;\n\t\trender_data.g_buffer_prev_frame.primary_hit_position = nullptr;\n\t}\n}\n\nsize_t FillGBufferRenderPass::get_ray_volume_state_byte_size()\n{\n\tOrochiBuffer<size_t> out_size_buffer(1);\n\tsize_t* out_size_buffer_pointer = out_size_buffer.get_device_pointer();\n\n\tThreadManager::join_threads(ThreadManager::COMPILE_RAY_VOLUME_STATE_SIZE_KERNEL_KEY);\n\n\tvoid* launch_args[] = { &out_size_buffer_pointer };\n\tm_ray_volume_state_byte_size_kernel->launch_synchronous(1, 1, 1, 1, launch_args, 0);\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(0));\n\n\t//std::cout << out_size_buffer.download_data()[0] << \" GPU\";\n\t//std::cout << sizeof(RayVolumeState) << \" CPU\" << std::endl;\n\t//std::exit(0);\n\t//return 0;\n\tsize_t size = out_size_buffer.download_data()[0];\n\n\treturn size;\n}\n\nvoid FillGBufferRenderPass::resize_g_buffer_ray_volume_states()\n{\n\tm_renderer->synchronize_all_kernels();\n\n\tm_g_buffer.ray_volume_states.resize(m_render_resolution.x * m_render_resolution.y, get_ray_volume_state_byte_size());\n\tif (m_renderer->get_render_data().render_settings.use_prev_frame_g_buffer())\n\t\tm_g_buffer_prev_frame.ray_volume_states.resize(m_render_resolution.x * m_render_resolution.y, get_ray_volume_state_byte_size());\n\n\tm_renderer->invalidate_render_data_buffers();\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/FillGBufferRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef CAMERA_RAYS_RENDER_PASS_H\n#define CAMERA_RAYS_RENDER_PASS_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"Renderer/GPUDataStructures/GBufferGPUData.h\"\n#include \"Renderer/RenderPasses/RenderPass.h\"\n\nclass GPURenderer;\n\nclass FillGBufferRenderPass : public RenderPass\n{\npublic:\n\tstatic const std::string FILL_GBUFFER_RENDER_PASS_NAME;\n\tstatic const std::string FILL_GBUFFER_KERNEL;\n\n\tFillGBufferRenderPass();\n\tFillGBufferRenderPass(GPURenderer* renderer);\n\n\tvirtual void compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}) override;\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override;\n\n\tvirtual bool pre_render_update(float delta_time) override;\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override {}\n\n\tvirtual void update_render_data() override;\n\tvirtual void reset(bool reset_by_camera_movement) override {};\n\n\tsize_t get_ray_volume_state_byte_size();\n\tvoid resize_g_buffer_ray_volume_states();\n\nprivate:\n\tint2 m_render_resolution = make_int2(0, 0);\n\n\t// G-buffers of the current frame (camera rays hits) and previous frame\n\tGBufferGPURenderer m_g_buffer;\n\tGBufferGPURenderer m_g_buffer_prev_frame;\n\n\t// Kernel used for retrieving the size of the RayVolumeState structure on the GPU\n\tstd::shared_ptr<GPUKernel> m_ray_volume_state_byte_size_kernel = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/GMoNRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"GMoNRenderPass.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/RenderWindow.h\"\n\nconst std::string GMoNRenderPass::GMON_RENDER_PASS_NAME = \"GMoN Render Pass\";\nconst std::string GMoNRenderPass::COMPUTE_GMON_KERNEL = \"Compute G-MoN\";\n\nGMoNRenderPass::GMoNRenderPass() : GMoNRenderPass(nullptr) {}\n\nGMoNRenderPass::GMoNRenderPass(GPURenderer* renderer) : RenderPass(renderer, GMoNRenderPass::GMON_RENDER_PASS_NAME)\n{\n\tm_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL] = std::make_shared<GPUKernel>();\n\tm_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/GMoN/GMoNComputeMedianOfMeans.h\");\n\tm_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->set_kernel_function_name(\"GMoNComputeMedianOfMeans\");\n\tm_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->synchronize_options_with(renderer->get_global_compiler_options(), {});\n}\n\nbool GMoNRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tint2 render_resolution = render_data.render_settings.render_resolution;\n\n\tif (is_render_pass_used())\n\t{\n\t\tunsigned int number_of_sets = m_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->get_kernel_options().get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT);\n\t\tif (m_gmon.current_resolution.x != render_resolution.x || m_gmon.current_resolution.y != render_resolution.y)\n\t\t{\n\t\t\t// Resizing the buffers because the resolution has changed\n\t\t\tm_gmon.resize_sets(render_resolution.x, render_resolution.y, get_number_of_sets_used());\n\t\t\tm_gmon.resize_interop(render_resolution.x, render_resolution.y);\n\n\t\t\trender_data.buffers.gmon_estimator.next_set_to_accumulate = 0;\n\n\t\t\t// Returning true to indicate that the render data buffers have been invalidated\n\t\t\treturn true;\n\t\t}\n\t\telse if (number_of_sets != m_gmon.current_number_of_sets)\n\t\t{\n\t\t\t// If the number of sets changed...\n\n\t\t\tm_gmon.resize_sets(render_resolution.x, render_resolution.y, get_number_of_sets_used());\n\t\t\trender_data.buffers.gmon_estimator.next_set_to_accumulate = 0;\n\n\t\t\treturn true;\n\t\t}\n\n\t\tif (m_gmon.gmon_auto_blend_factor)\n\t\t\t// Auto adjusting the GMoN blend factor\n\t\t\t// \n\t\t\t// Choosing the blending factor based on how many samples we've accumulated so far\n\t\t\t// \n\t\t\t// This is just a linear ramp.\n\t\t\t//\n\t\t\t// 0 blend factor at sample number 0\n\t\t\t// 1 blend factor at sample number (2 * number_of_sets^2)\n\t\t\tif (HIPRTRenderSettings::DEBUG_DEV_GMON_BLEND_WEIGHTS)\n\t\t\t\tm_gmon.gmon_blend_factor = 1.0f;// -m_darkening_factor;\n\t\t\telse\n\t\t\t\tm_gmon.gmon_blend_factor = hippt::clamp(0.0f, 1.0f, render_data.render_settings.sample_number / (2.0f * hippt::square(number_of_sets)));\n\n\t\t// Resetting the flag because we're now rendering a new frame\n\t\tm_gmon.m_gmon_recomputed = false;\n\t}\n\telse\n\t{\n\t\tif (!m_gmon.is_freed())\n\t\t{\n\t\t\tm_gmon.free();\n\n\t\t\t// Returning true to indicate that the render data buffers have been invalidated\n\t\t\treturn true;\n\t\t}\n\t}\n\n\trender_data.buffers.gmon_estimator.next_set_to_accumulate = m_next_set_to_accumulate;\n\n\treturn false;\n}\n\nbool GMoNRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\n\tstd::shared_ptr<ApplicationSettings> application_settings = m_renderer->get_application_settings();\n\n\tunsigned int number_of_sets = m_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->get_kernel_options().get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT);\n\n\t// Adding +1 to sample_number here because this launch() function is called after the renderer has accumulated\n\t// one more sample but before render_settings.sample_number is incremented\n\t//\n\t// We also want to update the viewport at sample 0 just so that we don't get a black viewport\n\t// (that update at sample 0 isn't going to be a full GMoN computation, it's just going to be\n\t// a copy of the current pixel color (which is only 1 sample accumuluated) to the framebuffer)\n\tbool enough_samples_accumulated = (render_data.render_settings.sample_number + 1) % number_of_sets == 0;\n\tbool sample_0 = render_data.render_settings.sample_number == 0;\n\tbool last_sample_of_render = render_data.render_settings.sample_number == (application_settings->max_sample_count - 1);\n\tbool recomputation_necessary = m_gmon.m_gmon_recomputation_requested || last_sample_of_render;\n\tif ((enough_samples_accumulated || sample_0) && recomputation_necessary)\n\t{\n\t\t// If we have rendered enough samples that one more sample has been accumulated in each of the\n\t\t// GMoN sets\n\t\tint2 render_resolution = m_renderer->m_render_resolution;\n\n\t\trender_data.buffers.gmon_estimator.next_set_to_accumulate = m_next_set_to_accumulate;\n\n\t\tvoid* launch_args[] = { &render_data };\n\n\t\tm_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->launch_asynchronous(\n\t\t\tGMoNComputeMeansKernelThreadBlockSize, GMoNComputeMeansKernelThreadBlockSize, render_resolution.x, render_resolution.y,\n\t\t\tlaunch_args,\n\t\t\tm_renderer->get_main_stream());\n\n\t\tm_gmon.m_gmon_recomputed = true;\n\t\tm_gmon.m_gmon_recomputation_requested = false;\n\t\tm_gmon.last_recomputed_sample_count = render_data.render_settings.sample_number + 1;\n\n\t\tm_darkening_factor = compute_gmon_darkening(render_data);\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nfloat GMoNRenderPass::compute_gmon_darkening(HIPRTRenderData& render_data)\n{\n\tif (!render_data.render_settings.DEBUG_gmon_auto_blending_weights || !HIPRTRenderSettings::DEBUG_DEV_GMON_BLEND_WEIGHTS)\n\t\treturn 0.0f;\n\n\tstd::vector<ColorRGB32F> result = OrochiBuffer<ColorRGB32F>::download_data(m_gmon.result_framebuffer->map(), m_gmon.result_framebuffer->size());\n\tstd::vector<ColorRGB32F> reference = OrochiBuffer<ColorRGB32F>::download_data(m_renderer->get_default_interop_framebuffer()->map(), m_gmon.result_framebuffer->size());\n\tstd::vector<float> blend_weights_framebuffer(reference.size());\n\n\tint debug_x_1 = 589; //51\n\tint debug_y_1 = 24;\n\tint debug_x_2 = 596;\n\tint debug_y_2 = 34;\n\n\tfor (int y = 0; y < m_renderer->m_render_resolution.y; y++)\n\t{\n\t\tfor (int x = 0; x < m_renderer->m_render_resolution.x; x++)\n\t\t{\n\t\t\tint index = x + y * m_renderer->m_render_resolution.x;\n\n\t\t\tColorRGB32F ref_color = reference[index] / render_data.render_settings.sample_number;\n\t\t\tColorRGB32F result_color = result[index] / render_data.render_settings.sample_number;\n\n\t\t\tref_color = ColorRGB32F(1.0f) - exp(-ref_color * 1.8f);\n\t\t\tref_color = pow(ref_color, 1.0f / 2.2f);\n\n\t\t\tresult_color = ColorRGB32F(1.0f) - exp(-result_color * 1.8f);\n\t\t\tresult_color = pow(result_color, 1.0f / 2.2f);\n\n\t\t\tfloat ref_luminance = ref_color.luminance();\n\t\t\tfloat result_luminance = result_color.luminance();\n\n\t\t\tif (x == debug_x_1 && m_renderer->m_render_resolution.y - 1 - y == debug_y_1)\n\t\t\t\tstd::cout << std::endl;\n\n\t\t\tif (ref_luminance - result_luminance > ref_luminance / 2.0f)\n\t\t\t{\n\t\t\t\t// If the pixel has lost a lot of luminance i.e. darkening, determining if this is a firefly or not\n\n\t\t\t\tint window_size = render_data.render_settings.DEBUG_GMON_WINDOW_SIZE;\n\t\t\t\tint valid_neighbors = 0;\n\t\t\t\tfloat neighbor_luminance_sum = 0.0f;\n\t\t\t\tfloat neighbor_luminance_average = 0.0f;\n\n\t\t\t\t// Computing the average of neighbors\n\t\t\t\tfor (int j = -window_size / 2; j <= window_size / 2; j++)\n\t\t\t\t{\n\t\t\t\t\tfor (int i = -window_size / 2; i <= window_size / 2; i++)\n\t\t\t\t\t{\n\t\t\t\t\t\tint neighbor_index_x = x + i;\n\t\t\t\t\t\tint neighbor_index_y = y + j;\n\t\t\t\t\t\tif (neighbor_index_x < 0 || neighbor_index_x >= m_renderer->m_render_resolution.x || neighbor_index_y < 0 || neighbor_index_y >= m_renderer->m_render_resolution.y)\n\t\t\t\t\t\t\tcontinue;\n\t\t\t\t\t\telse if (i == 0 && j == 0)\n\t\t\t\t\t\t\t// Not counting the center pixel\n\t\t\t\t\t\t\tcontinue;\n\n\t\t\t\t\t\tint neighbor_index = neighbor_index_x + neighbor_index_y * m_renderer->m_render_resolution.x;\n\n\t\t\t\t\t\tColorRGB32F current_color = reference[neighbor_index] / render_data.render_settings.sample_number;\n\t\t\t\t\t\tcurrent_color = ColorRGB32F(1.0f) - exp(-current_color * 1.8f);\n\t\t\t\t\t\tcurrent_color = pow(current_color, 1.0f / 2.2f);\n\n\t\t\t\t\t\tvalid_neighbors++;\n\t\t\t\t\t\tneighbor_luminance_sum += current_color.luminance();\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (x == debug_x_1 && m_renderer->m_render_resolution.y - 1 - y == debug_y_1)\n\t\t\t\t\tstd::cout << std::endl;\n\n\t\t\t\tneighbor_luminance_average = neighbor_luminance_sum / valid_neighbors;\n\n\t\t\t\tfloat brighter = ref_luminance / neighbor_luminance_average;\n\n\t\t\t\tblend_weights_framebuffer[index] = hippt::inverse_lerp(brighter, 1.0f, 3.0f);\n\t\t\t}\n\t\t\telse\n\t\t\t\tblend_weights_framebuffer[index] = 1.0f;\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\t\t//int window_size = 5;\n\t\t\t//int valid_neighbors = 0;\n\t\t\t//float neighbor_luminance_sum = 0.0f;\n\t\t\t//float neighbor_luminance_average = 0.0f;\n\t\t\t//std::vector<float> neighbors_luminance;\n\t\t\t//neighbors_luminance.reserve(window_size * window_size);\n\n\t\t\t//// Computing the average of neighbors\n\t\t\t//for (int i = -window_size / 2; i <= window_size / 2; i++)\n\t\t\t//{\n\t\t\t//\tfor (int j = -window_size / 2; j <= window_size / 2; j++)\n\t\t\t//\t{\n\t\t\t//\t\tint neighbor_index_x = x + j;\n\t\t\t//\t\tint neighbor_index_y = y + i;\n\t\t\t//\t\tif (neighbor_index_x < 0 || neighbor_index_x >= m_renderer->m_render_resolution.x || neighbor_index_y < 0 || neighbor_index_y >= m_renderer->m_render_resolution.y)\n\t\t\t//\t\t\tcontinue;\n\n\t\t\t//\t\tint neighbor_index = neighbor_index_x + neighbor_index_y * m_renderer->m_render_resolution.x;\n\n\t\t\t//\t\tfloat current_luminance = reference[neighbor_index].luminance() / render_data.render_settings.sample_number;\n\t\t\t//\t\tvalid_neighbors++;\n\t\t\t//\t\tneighbor_luminance_sum += current_luminance;\n\t\t\t//\t\tneighbors_luminance.push_back(current_luminance);\n\t\t\t//\t}\n\t\t\t//}\n\t\t\t//neighbor_luminance_average = neighbor_luminance_sum / valid_neighbors;\n\n\t\t\t//// Computing the variance\n\t\t\t//float neighbor_luminance_variance = 0.0f;\n\t\t\t//for (int i = -window_size / 2; i <= window_size / 2; i++)\n\t\t\t//{\n\t\t\t//\tfor (int j = -window_size / 2; j <= window_size / 2; j++)\n\t\t\t//\t{\n\t\t\t//\t\tint neighbor_index_x = x + j;\n\t\t\t//\t\tint neighbor_index_y = y + i;\n\t\t\t//\t\tif (neighbor_index_x < 0 || neighbor_index_x >= m_renderer->m_render_resolution.x || neighbor_index_y < 0 || neighbor_index_y >= m_renderer->m_render_resolution.y)\n\t\t\t//\t\t\tcontinue;\n\n\t\t\t//\t\tint neighbor_index = neighbor_index_x + neighbor_index_y * m_renderer->m_render_resolution.x;\n\n\t\t\t//\t\tfloat current_luminance = reference[neighbor_index].luminance() / render_data.render_settings.sample_number;\n\t\t\t//\t\tneighbor_luminance_variance += hippt::square(current_luminance - neighbor_luminance_average) / valid_neighbors;\n\t\t\t//\t}\n\t\t\t//}\n\n\t\t\t//if (x == debug_x_1 && m_renderer->m_render_resolution.y - 1 - y == debug_y_1)\n\t\t\t//\tm_DEBUG_LUMINANCE_VARIANCE1 = neighbor_luminance_variance;\n\t\t\t//if (x == debug_x_2 && m_renderer->m_render_resolution.y - 1 - y == debug_y_2)\n\t\t\t//\tm_DEBUG_LUMINANCE_VARIANCE2 = neighbor_luminance_variance;\n\n\t\t\t//std::sort(neighbors_luminance.begin(), neighbors_luminance.end());\n\n\t\t\t/*if (hippt::abs(x - debug_x_1) <= 5 && hippt::abs(m_renderer->m_render_resolution.y - y - debug_y_1 - 1) <= 5)\n\t\t\t\tresult[index] = ColorRGB32F(1.0e10f, 0.0f, 0.0f);*/\n\n\t\t\t\t//blend_weights_framebuffer[index] = 1.0f;\n\t\t}\n\t}\n\n\tfor (int i = 0; i < result.size(); i++)\n\t\tresult[i] = hippt::lerp(reference[i], result[i], blend_weights_framebuffer[i]);\n\tOrochiBuffer<ColorRGB32F>::upload_data(m_gmon.result_framebuffer->map(), result, m_gmon.result_framebuffer->size());\n\n\treturn 0.0f;// 1.0f - result_luminance_sum / ref_luminance_sum;\n}\n\nfloat GMoNRenderPass::get_gmon_darkening()\n{\n\treturn m_darkening_factor;\n}\n\nfloat GMoNRenderPass::get_lumi()\n{\n\treturn m_DEBUG_LUMINANCE_VARIANCE1;\n}\n\nvoid GMoNRenderPass::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (m_render_pass_used_this_frame)\n\t{\n\t\t// We're going to increment the counter that indicates in which sets of GMoN to accumulate\n\t\tm_next_set_to_accumulate++;\n\t\tif (m_next_set_to_accumulate == m_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->get_kernel_options().get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT))\n\t\t\t// Going back to 0 if we've reached the end of the sets, round robin style\n\t\t\tm_next_set_to_accumulate = 0;\n\t}\n\n\trender_data.buffers.gmon_estimator.next_set_to_accumulate = m_next_set_to_accumulate;\n}\n\nvoid GMoNRenderPass::request_recomputation()\n{\n\tm_gmon.m_gmon_recomputation_requested = true;\n}\n\nbool GMoNRenderPass::recomputation_completed()\n{\n\treturn m_gmon.m_gmon_recomputed;\n}\n\nbool GMoNRenderPass::recomputation_requested()\n{\n\treturn m_gmon.m_gmon_recomputation_requested;\n}\n\nunsigned int GMoNRenderPass::get_last_recomputed_sample_count()\n{\n\treturn m_gmon.last_recomputed_sample_count;\n}\n\nvoid GMoNRenderPass::reset(bool reset_by_camera_movement)\n{\n\tif (is_render_pass_used())\n\t{\n\t\tm_next_set_to_accumulate = 0;\n\n\t\tif (buffers_allocated() && !m_render_window->is_interacting())\n\t\t\tm_gmon.sets.memset_whole_buffer(ColorRGB32F(0.0f));\n\n\t\t// Requesting a computation on reset just so that we copy the very\n\t\t// first sample to the framebuffer to avoid having a black viewport\n\t\t// until the next GMoN recomputation\n\t\tm_gmon.m_gmon_recomputation_requested = true;\n\t}\n}\n\nvoid GMoNRenderPass::update_render_data()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (m_gmon.sets.is_allocated())\n\t\trender_data.buffers.gmon_estimator.sets = m_gmon.sets.get_device_pointer();\n\telse\n\t\trender_data.buffers.gmon_estimator.sets = nullptr;\n}\n\nstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> GMoNRenderPass::get_result_framebuffer()\n{\n\treturn m_gmon.result_framebuffer;\n}\n\nunsigned int GMoNRenderPass::get_number_of_sets_used()\n{\n\treturn m_kernels[GMoNRenderPass::COMPUTE_GMON_KERNEL]->get_kernel_options().get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT);\n}\n\nvoid GMoNRenderPass::resize(unsigned int new_width, unsigned int new_height)\n{\n\tif (is_render_pass_used())\n\t{\n\t\tm_gmon.resize_sets(new_width, new_height, get_number_of_sets_used());\n\n\t\tm_gmon.result_framebuffer->resize(new_width * new_height);\n\t}\n}\n\nColorRGB32F* GMoNRenderPass::map_result_framebuffer()\n{\n\tif (is_render_pass_used())\n\t\treturn m_gmon.map_result_framebuffer();\n\n\treturn nullptr;\n}\n\nvoid GMoNRenderPass::unmap_result_framebuffer()\n{\n\tif (is_render_pass_used())\n\t\tm_gmon.result_framebuffer->unmap();\n}\n\nbool GMoNRenderPass::buffers_allocated()\n{\n\treturn m_gmon.sets.size() > 0;\n}\n\nbool GMoNRenderPass::is_render_pass_used() const\n{\n\tbool gmon_enabled = m_gmon.using_gmon;\n\tbool accumulation_enabled = m_renderer->get_render_settings().accumulate;\n\n\treturn gmon_enabled && accumulation_enabled;\n}\n\nGMoNGPUData& GMoNRenderPass::get_gmon_data()\n{\n\treturn m_gmon;\n}\n\nunsigned int GMoNRenderPass::get_VRAM_usage_bytes() const\n{\n\tif (!is_render_pass_used())\n\t\treturn 0;\n\n\treturn m_gmon.get_VRAM_usage_bytes();\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> GMoNRenderPass::get_tracing_kernels()\n{\n\treturn {};\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/GMoNRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_GMON_RENDER_PASS_H\n#define RENDERER_GMON_RENDER_PASS_H\n\n#include \"Compiler/GPUKernel.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"Renderer/GPUDataStructures/GMoNGPUData.h\"\n#include \"Renderer/RenderPasses/RenderPass.h\"\n#include \"UI/ApplicationSettings.h\"\n\nclass GPURenderer;\n\nclass GMoNRenderPass : public RenderPass\n{\npublic:\n\tstatic const std::string GMON_RENDER_PASS_NAME;\n\tstatic const std::string COMPUTE_GMON_KERNEL;\n\n\tGMoNRenderPass();\n\tGMoNRenderPass(GPURenderer* renderer);\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override;\n\n\t/**\n\t * Allocates/deallocates the buffers used by GMoN.\n\t * \n\t * Returns true or false depending on whether or not the render buffer data have been invalidated\n\t */\n\tvirtual bool pre_render_update(float delta_time) override;\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tfloat compute_gmon_darkening(HIPRTRenderData& render_data);\n\tfloat get_gmon_darkening();\n\n\tfloat get_lumi();\n\n\t/**\n\t * Does the actual allocation/deallocation of the GMoN buffers.\n\t * \n\t * Returns true a buffer was allocated or deallocated\n\t * Returns false if buffer were left untouched\n\t */\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tvirtual void update_render_data() override;\n\tvirtual void reset(bool reset_by_camera_movement) override;\n\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels() override;\n\n\tvirtual bool is_render_pass_used() const override;\n\n\tvoid request_recomputation();\n\tbool recomputation_completed();\n\tbool recomputation_requested();\n\n\tunsigned int get_last_recomputed_sample_count();\n\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> get_result_framebuffer();\n\tunsigned int get_number_of_sets_used();\n\n\tColorRGB32F* map_result_framebuffer();\n\tvoid unmap_result_framebuffer();\n\t/**\n\t * Returns true or false depending on whether or not the GMoN buffers are allocated\n\t */\n\tbool buffers_allocated();\n\n\tGMoNGPUData& get_gmon_data();\n\tunsigned int get_VRAM_usage_bytes() const;\n\n\tfloat m_DEBUG_LUMINANCE_VARIANCE1 = 0.0f;\n\tfloat m_DEBUG_LUMINANCE_VARIANCE2 = 0.0f;\nprivate:\n\tfloat m_darkening_factor = 0.0f;\n\n\t// Data for the GMoN estimator\n\tGMoNGPUData m_gmon;\n\n\tint m_next_set_to_accumulate = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/MegaKernelRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/MegaKernelRenderPass.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"Threads/ThreadFunctions.h\"\n\nconst std::string MegaKernelRenderPass::MEGAKERNEL_RENDER_PASS_NAME = \"Megakernel Render Pass\";\nconst std::string MegaKernelRenderPass::MEGAKERNEL_KERNEL = \"Megakernel (1 SPP)\";\n\nMegaKernelRenderPass::MegaKernelRenderPass() : MegaKernelRenderPass(nullptr) {}\nMegaKernelRenderPass::MegaKernelRenderPass(GPURenderer* renderer) : MegaKernelRenderPass(renderer, MegaKernelRenderPass::MEGAKERNEL_RENDER_PASS_NAME) {}\nMegaKernelRenderPass::MegaKernelRenderPass(GPURenderer* renderer, const std::string& name) : RenderPass(renderer, name) \n{\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL] = std::make_shared<GPUKernel>();\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->set_kernel_file_path(DEVICE_KERNELS_DIRECTORY \"/Megakernel.h\");\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->set_kernel_function_name(\"MegaKernel\");\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->synchronize_options_with(m_renderer->get_global_compiler_options(), GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n}\n\nvoid MegaKernelRenderPass::resize(unsigned int new_width, unsigned int new_height)\n{\n\tm_render_resolution.x = new_width;\n\tm_render_resolution.y = new_height;\n}\n\nbool MegaKernelRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (!is_render_pass_used())\n\t\treturn false;\n\n\t// Resetting this flag as this is a new frame\n\trender_data.render_settings.do_update_status_buffers = false;\n\n\tif (!render_data.render_settings.accumulate)\n\t\trender_data.render_settings.sample_number = 0;\n\n\treturn false;\n}\n\nbool MegaKernelRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\t\t\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\t\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[MegaKernelRenderPass::MEGAKERNEL_KERNEL]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_render_resolution.x, m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\n\treturn true;\n}\n\nvoid MegaKernelRenderPass::reset(bool reset_by_camera_movement)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (!is_render_pass_used())\n\t\treturn;\n\n\tif (render_data.render_settings.accumulate)\n\t\tif (m_renderer->get_application_settings()->auto_sample_per_frame)\n\t\t\trender_data.render_settings.samples_per_frame = 1;\n\n\trender_data.render_settings.denoiser_AOV_accumulation_counter = 0;\n\n\trender_data.render_settings.sample_number = 0;\n}\n\nbool MegaKernelRenderPass::is_render_pass_used() const\n{\n\t// Only active if we're not using ReSTIR GI because if we are using ReSTIR, the path tracing is done in\n\t// the initial candidates kernel\n\treturn m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY) != PSS_RESTIR_GI;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/MegaKernelRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef MEGAKERNEL_RENDER_PASS_H\n#define MEGAKERNEL_RENDER_PASS_H\n\n#include \"Renderer/RenderPasses/RenderPass.h\"\n\nclass MegaKernelRenderPass : public RenderPass\n{\npublic:\n\tstatic const std::string MEGAKERNEL_RENDER_PASS_NAME;\n\tstatic const std::string MEGAKERNEL_KERNEL;\n\n\tMegaKernelRenderPass();\n\tMegaKernelRenderPass(GPURenderer* renderer);\n\tMegaKernelRenderPass(GPURenderer* renderer, const std::string& name);\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height);\n\t\n\tvirtual bool pre_render_update(float delta_time) override;\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override {};\n\n\tvirtual void update_render_data() {};\n\tvirtual void reset(bool reset_by_camera_movement);\n\n\tvirtual bool is_render_pass_used() const override;\n\nprivate:\n\tint2 m_render_resolution = make_int2(0, 0);\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/NEEPlusPlusHashGridStorage.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/NEEPlusPlusHashGridStorage.h\"\n#include \"Renderer/RenderPasses/NEEPlusPlusRenderPass.h\"\n\nvoid NEEPlusPlusHashGridStorage::set_nee_plus_plus_render_pass(NEEPlusPlusRenderPass* nee_plus_plus_render_pass)\n{\n\tm_nee_plus_plus_render_pass = nee_plus_plus_render_pass;\n}\n\nbool NEEPlusPlusHashGridStorage::pre_render_update(HIPRTRenderData& render_data, bool is_interacting_camera)\n{\n\tbool updated = false;\n\n\t// Allocating the buffers\n\tif (m_total_num_rays.size() == 0)\n\t{\n\t\tm_total_num_rays.resize(NEEPlusPlusHashGridStorage::DEFAULT_GRID_SIZE);\n\t\tm_total_unoccluded_rays.resize(NEEPlusPlusHashGridStorage::DEFAULT_GRID_SIZE);\n\t\tm_checksum_buffer.resize(NEEPlusPlusHashGridStorage::DEFAULT_GRID_SIZE);\n\n\t\tm_shadow_rays_actually_traced.resize(1);\n\t\tm_total_shadow_ray_queries.resize(1);\n\t\tm_total_cells_alive_count.resize(1);\n\t\tm_total_cells_alive_count_cpu_host_pinned_buffer.resize_host_pinned_mem(1);\n\n\t\tupdated = true;\n\t}\n\n\t// Clearing the visibility map if this has been asked by the user\n\tif (render_data.nee_plus_plus.m_reset_visibility_map && !is_interacting_camera)\n\t{\n\t\t// Clearing the visibility map by memseting everything to 0\n\t\t// Adn let's not reset everything while moving the camera because that's annoying for\n\t\t// the performance\n\n\t\tm_total_num_rays.memset_whole_buffer(0);\n\t\tm_total_unoccluded_rays.memset_whole_buffer(0);\n\t\tm_checksum_buffer.memset_whole_buffer(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\n\t\tm_total_shadow_ray_queries.memset_whole_buffer(0);\n\t\tm_shadow_rays_actually_traced.memset_whole_buffer(0);\n\t\tm_total_cells_alive_count.memset_whole_buffer(0);\n\t}\n\n\tif (render_data.render_settings.sample_number > render_data.nee_plus_plus.m_stop_update_samples)\n\t\t// Past a certain number of samples, there isn't really a point to keep updating, the visibility map\n\t\t// is probably converged enough that it doesn't make a difference anymore\n\t\trender_data.nee_plus_plus.m_update_visibility_map = false;\n\n\treturn updated;\n}\n\nvoid NEEPlusPlusHashGridStorage::update_render_data(HIPRTRenderData& render_data)\n{\n\tif (m_nee_plus_plus_render_pass->is_render_pass_used())\n\t{\n\t\trender_data.nee_plus_plus.m_entries_buffer.total_num_rays = m_total_num_rays.get_atomic_device_pointer();\n\t\trender_data.nee_plus_plus.m_entries_buffer.total_unoccluded_rays = m_total_unoccluded_rays.get_atomic_device_pointer();\n\t\trender_data.nee_plus_plus.m_entries_buffer.checksum_buffer = m_checksum_buffer.get_atomic_device_pointer();\n\t\trender_data.nee_plus_plus.m_total_number_of_cells = m_checksum_buffer.size();\n\n\t\trender_data.nee_plus_plus.m_shadow_rays_actually_traced = m_shadow_rays_actually_traced.get_atomic_device_pointer();\n\t\trender_data.nee_plus_plus.m_total_shadow_ray_queries = m_total_shadow_ray_queries.get_atomic_device_pointer();\n\t\trender_data.nee_plus_plus.m_total_cells_alive_count = m_total_cells_alive_count.get_atomic_device_pointer();\n\t}\n\telse\n\t{\n\t\trender_data.nee_plus_plus.m_entries_buffer.total_num_rays = nullptr;\n\t\trender_data.nee_plus_plus.m_entries_buffer.total_unoccluded_rays = nullptr;\n\t\trender_data.nee_plus_plus.m_entries_buffer.checksum_buffer = nullptr;\n\n\t\trender_data.nee_plus_plus.m_shadow_rays_actually_traced = nullptr;\n\t\trender_data.nee_plus_plus.m_total_shadow_ray_queries = nullptr;\n\t\trender_data.nee_plus_plus.m_total_cells_alive_count = nullptr;\n\t}\n}\n\nbool NEEPlusPlusHashGridStorage::free()\n{\n\tif (m_total_num_rays.size() != 0)\n\t{\n\t\tm_total_num_rays.free();\n\t\tm_total_unoccluded_rays.free();\n\t\tm_checksum_buffer.free();\n\n\t\tm_total_shadow_ray_queries.free();\n\t\tm_shadow_rays_actually_traced.free();\n\n\t\tm_total_cells_alive_count.free();\n\t\tm_total_cells_alive_count_cpu_host_pinned_buffer.free();\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid NEEPlusPlusHashGridStorage::reset()\n{\n\tHIPRTRenderData& render_data = m_nee_plus_plus_render_pass->m_renderer->get_render_data();\n\n\trender_data.nee_plus_plus.m_reset_visibility_map = true;\n\trender_data.nee_plus_plus.m_update_visibility_map = true;\n\n\t// Resetting the counters\n\tif (m_total_shadow_ray_queries.is_allocated())\n\t{\n\t\tm_total_shadow_ray_queries.memset_whole_buffer(1);\n\t\tm_shadow_rays_actually_traced.memset_whole_buffer(1);\n\t\tm_total_cells_alive_count.memset_whole_buffer(0);\n\t}\n}\n\nbool NEEPlusPlusHashGridStorage::try_resize(HIPRTRenderData& render_data, float max_megabyte_size)\n{\n\tupdate_cell_alive_count();\n\n\tfloat load_factor = m_total_cells_alive_count_cpu / (float)m_checksum_buffer.size();\n\tbool load_factor_too_high = load_factor > 0.75f;\n\tbool maximum_size_not_reached = get_byte_size() / 1000000.0f <= max_megabyte_size * 0.95f;\n\tbool maximum_size_exceeded = get_byte_size() / 1000000.0f >= max_megabyte_size * 1.05f;\n\tif ((load_factor_too_high && maximum_size_not_reached) || maximum_size_exceeded)\n\t{\n\t\tunsigned int current_cell_count = m_checksum_buffer.size();\n\t\tunsigned int new_cell_count = current_cell_count * 1.5f;\n\n\t\tfloat cell_size_byte = get_byte_size() / current_cell_count;\n\t\tfloat estimated_new_size_bytes = cell_size_byte * new_cell_count;\n\t\tif (estimated_new_size_bytes > max_megabyte_size * 1000000.0f)\n\t\t\t// If the estimated new size is larger than the maximum size,\n\t\t\t// we need to compute the proper number of cells to fit the maximum size\n\t\t\tnew_cell_count = static_cast<unsigned int>(max_megabyte_size * 1000000.0f / cell_size_byte);\n\n\t\tm_total_unoccluded_rays.resize(new_cell_count);\n\t\tm_total_num_rays.resize(new_cell_count);\n\t\tm_checksum_buffer.resize(new_cell_count);\n\n\t\tm_total_unoccluded_rays.memset_whole_buffer(0);\n\t\tm_total_num_rays.memset_whole_buffer(0);\n\t\tm_checksum_buffer.memset_whole_buffer(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\t\tm_total_cells_alive_count.memset_whole_buffer(0);\n\n\t\tupdate_render_data(render_data);\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nunsigned int NEEPlusPlusHashGridStorage::update_cell_alive_count()\n{\n\tm_total_cells_alive_count.download_data_into(m_total_cells_alive_count_cpu_host_pinned_buffer.get_host_pinned_pointer());\n\tm_total_cells_alive_count_cpu = m_total_cells_alive_count_cpu_host_pinned_buffer.get_host_pinned_pointer()[0];\n\n\treturn get_cell_alive_count();\n}\n\nunsigned int NEEPlusPlusHashGridStorage::get_cell_alive_count() const\n{\n\treturn m_total_cells_alive_count_cpu;\n}\n\nstd::size_t NEEPlusPlusHashGridStorage::get_shadow_rays_actually_traced_from_GPU() const\n{\n\tauto data = m_shadow_rays_actually_traced.download_data();\n\tif (data.size() > 0)\n\t\treturn data[0];\n\telse\n\t\treturn 0;\n}\n\nstd::size_t NEEPlusPlusHashGridStorage::get_total_shadow_rays_queries_from_GPU() const\n{\n\tauto data = m_total_shadow_ray_queries.download_data();\n\tif (data.size() > 0)\n\t\treturn data[0];\n\telse\n\t\treturn 0;\n}\n\nstd::size_t NEEPlusPlusHashGridStorage::get_byte_size() const\n{\n\treturn m_total_unoccluded_rays.get_byte_size() +\n\t\tm_total_num_rays.get_byte_size() +\n\t\tm_checksum_buffer.get_byte_size() +\n\n\t\tm_total_shadow_ray_queries.get_byte_size() +\n\t\tm_shadow_rays_actually_traced.get_byte_size() +\n\t\tm_total_cells_alive_count.get_byte_size();\n}\n\nfloat NEEPlusPlusHashGridStorage::get_load_factor() const\n{\n\treturn get_cell_alive_count() / (float)m_total_num_rays.size();\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/NEEPlusPlusHashGridStorage.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_NEE_PLUS_PLUS_HASH_GRID_STORAGE_H\n#define RENDERER_NEE_PLUS_PLUS_HASH_GRID_STORAGE_H\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n\nclass NEEPlusPlusRenderPass;\n\nclass NEEPlusPlusHashGridStorage\n{\npublic:\n\tstatic constexpr unsigned int DEFAULT_GRID_SIZE = 1000000;\n\n\tvoid set_nee_plus_plus_render_pass(NEEPlusPlusRenderPass* nee_plus_plus_render_pass);\n\n\tbool pre_render_update(HIPRTRenderData& render_data, bool is_interacting_camera);\n\t\n\tvoid update_render_data(HIPRTRenderData& render_data);\n\tbool free();\n\tvoid reset();\n\n\tbool try_resize(HIPRTRenderData& render_data, float max_megabyte_size);\n\n\tunsigned int update_cell_alive_count();\n\tunsigned int get_cell_alive_count() const;\n\n\tstd::size_t get_shadow_rays_actually_traced_from_GPU() const;\n\tstd::size_t get_total_shadow_rays_queries_from_GPU() const;\n\tstd::size_t get_byte_size() const;\n\tfloat get_load_factor() const;\n\nprivate:\n\tNEEPlusPlusRenderPass* m_nee_plus_plus_render_pass;\n\n\tOrochiBuffer<unsigned int> m_total_unoccluded_rays;\n\tOrochiBuffer<unsigned int> m_total_num_rays;\n\n\tOrochiBuffer<unsigned int> m_checksum_buffer;\n\t\n\t// Counters on the GPU for tracking \n\tOrochiBuffer<unsigned long long int> m_total_shadow_ray_queries;\n\tOrochiBuffer<unsigned long long int> m_shadow_rays_actually_traced;\n\n\tOrochiBuffer<unsigned int> m_total_cells_alive_count;\n\tOrochiBuffer<unsigned int> m_total_cells_alive_count_cpu_host_pinned_buffer;\n\tunsigned int m_total_cells_alive_count_cpu = 0;\n};\n\n#endif"
  },
  {
    "path": "src/Renderer/RenderPasses/NEEPlusPlusRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/NEEPlusPlusRenderPass.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"UI/RenderWindow.h\"\n \nconst std::string NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE = \"NEE++ Pre-population\";\n\nconst std::string NEEPlusPlusRenderPass::NEE_PLUS_PLUS_RENDER_PASS_NAME = \"NEE++ Render Pass\";\n\nconst std::unordered_map<std::string, std::string> NEEPlusPlusRenderPass::KERNEL_FUNCTION_NAMES =\n{\n\t{ NEE_PLUS_PLUS_PRE_POPULATE, \"NEEPlusPlus_Grid_Prepopulate\" },\n};\n\nconst std::unordered_map<std::string, std::string> NEEPlusPlusRenderPass::KERNEL_FILES =\n{\n\t{ NEE_PLUS_PLUS_PRE_POPULATE, DEVICE_KERNELS_DIRECTORY \"/NEE++/GridPrepopulate.h\" },\n};\n\nNEEPlusPlusRenderPass::NEEPlusPlusRenderPass() : NEEPlusPlusRenderPass(nullptr) {}\nNEEPlusPlusRenderPass::NEEPlusPlusRenderPass(GPURenderer* renderer) : NEEPlusPlusRenderPass(renderer, NEEPlusPlusRenderPass::NEE_PLUS_PLUS_RENDER_PASS_NAME) {}\nNEEPlusPlusRenderPass::NEEPlusPlusRenderPass(GPURenderer* renderer, const std::string& name) : RenderPass(renderer, name) \n{\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_compiler_options = m_renderer->get_global_compiler_options();\n\t\n\tstd::unordered_set<std::string> options_not_synchronized = GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED;\n\toptions_not_synchronized.insert(GPUKernelCompilerOptions::BSDF_OVERRIDE);\n\t\n\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE] = std::make_shared<GPUKernel>();\n\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->set_kernel_file_path(NEEPlusPlusRenderPass::KERNEL_FILES.at(NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE));\n\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->set_kernel_function_name(NEEPlusPlusRenderPass::KERNEL_FUNCTION_NAMES.at(NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE));\n\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::BSDF_OVERRIDE, BSDF_LAMBERTIAN);\n\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->synchronize_options_with(global_compiler_options, options_not_synchronized);\n\n\tm_nee_plus_plus_storage.set_nee_plus_plus_render_pass(this);\n}\n\nbool NEEPlusPlusRenderPass::pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tif (!is_render_pass_used())\n\t\treturn false;\n\n\tbool nee_plus_plus__grid_populate_compiled = m_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->has_been_compiled();\n\tif (!nee_plus_plus__grid_populate_compiled)\n\t\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\treturn !nee_plus_plus__grid_populate_compiled;\n}\n \nbool NEEPlusPlusRenderPass::pre_render_update(float delta_time)\n{\n\tif (!is_render_pass_used())\n\t\treturn m_nee_plus_plus_storage.free();\n\n    HIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\treturn m_nee_plus_plus_storage.pre_render_update(render_data, m_render_window->is_interacting());\n}\n \nvoid NEEPlusPlusRenderPass::update_render_data()\n{\n\tm_nee_plus_plus_storage.update_render_data(m_renderer->get_render_data());\n}\n\nbool NEEPlusPlusRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) \n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\n\tif (render_data.render_settings.sample_number == 0 && !m_render_window->is_interacting())\n\t{\n\t\tm_render_window->set_ImGui_status_text(\"NEE++ Prepopulation pass...\");\n\t\tlaunch_grid_pre_population(render_data);\n\t\tm_render_window->clear_ImGui_status_text();\n\t}\n\n\tif (m_nee_plus_plus_storage.try_resize(render_data, m_max_vram_usage_megabytes))\n\t\tupdate_render_data();\n\n\treturn true;\n}\n\nvoid NEEPlusPlusRenderPass::launch_grid_pre_population(HIPRTRenderData& render_data)\n{\n\tbool has_rehashed = false;\n\n\tdo\n\t{\n\t\tvoid* launch_args[] = { &render_data };\n\n\t\tm_kernels[NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE]->launch_asynchronous(\n\t\t\tKernelBlockWidthHeight, KernelBlockWidthHeight,\n\t\t\tm_renderer->m_render_resolution.x / NEEPlusPlus_GridPrepoluationResolutionDownscale, m_renderer->m_render_resolution.y / NEEPlusPlus_GridPrepoluationResolutionDownscale,\n\t\t\tlaunch_args, m_renderer->get_main_stream());\n\n\t\thas_rehashed = m_nee_plus_plus_storage.try_resize(render_data, m_max_vram_usage_megabytes);\n\t\tif (has_rehashed)\n\t\t\tupdate_render_data();\n\n\t} while (has_rehashed);\n}\n\nvoid NEEPlusPlusRenderPass::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) {}\n \nfloat NEEPlusPlusRenderPass::get_full_frame_time()\n{\n\tfloat sum = 0.0f;\n\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t{\n\t\tif (name_to_kernel.first == NEEPlusPlusRenderPass::NEE_PLUS_PLUS_PRE_POPULATE)\n\t\t\t// Not counting the pre population pass in the frame time since this is only\n\t\t\t// done on the very first frame of the render, not really reprensentative of the\n\t\t\t// true frame time\n\t\t\tcontinue;\n\n\t\tsum += name_to_kernel.second->get_last_execution_time();\n\t}\n\n\treturn sum;\n}\n\nvoid NEEPlusPlusRenderPass::reset(bool reset_by_camera_movement)\n{\n     if (!is_render_pass_used())\n         return;\n\n\tm_nee_plus_plus_storage.reset();\n}\n\n\nbool NEEPlusPlusRenderPass::is_render_pass_used() const\n{\n     // Only active if we're not using ReSTIR GI because if we are using ReSTIR, the path tracing is done in\n     // the initial candidates kernel\n     return m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS) == KERNEL_OPTION_TRUE;\n}\n \nNEEPlusPlusHashGridStorage& NEEPlusPlusRenderPass::get_nee_plus_plus_storage()\n{\n    return m_nee_plus_plus_storage;\n}\n\nfloat& NEEPlusPlusRenderPass::get_max_vram_usage()\n{\n\treturn m_max_vram_usage_megabytes;\n}\n\nstd::size_t NEEPlusPlusRenderPass::get_vram_usage_bytes() const\n{\n\treturn m_nee_plus_plus_storage.get_byte_size();\n}\n\nfloat NEEPlusPlusRenderPass::get_load_factor() const\n{\n\treturn m_nee_plus_plus_storage.get_load_factor();\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/NEEPlusPlusRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n #ifndef NEE_PLUS_PLUS_RENDER_PASS_H\n #define NEE_PLUS_PLUS_RENDER_PASS_H\n \n #include \"Renderer/RenderPasses/NEEPlusPlusHashGridStorage.h\"\n #include \"Renderer/RenderPasses/RenderPass.h\"\n \nclass NEEPlusPlusRenderPass : public RenderPass\n{\npublic:\n    static const std::string NEE_PLUS_PLUS_PRE_POPULATE;\n\n    static const std::string NEE_PLUS_PLUS_RENDER_PASS_NAME;\n \n    static const std::unordered_map<std::string, std::string> KERNEL_FUNCTION_NAMES;\n    static const std::unordered_map<std::string, std::string> KERNEL_FILES;\n\n    NEEPlusPlusRenderPass();\n    NEEPlusPlusRenderPass(GPURenderer* renderer);\n    NEEPlusPlusRenderPass(GPURenderer* renderer, const std::string& name);\n \n    virtual void resize(unsigned int new_width, unsigned int new_height) override {};\n     \n    virtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) override;\n    virtual bool pre_render_update(float delta_time) override;\n\n    virtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n    void launch_grid_pre_population(HIPRTRenderData& render_data);\n\n    virtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n    virtual float get_full_frame_time() override;\n\n    virtual void update_render_data() override;\n    virtual void reset(bool reset_by_camera_movement);\n \n    virtual bool is_render_pass_used() const override;\n\n    NEEPlusPlusHashGridStorage& get_nee_plus_plus_storage();\n\n    float& get_max_vram_usage();\n    std::size_t get_vram_usage_bytes() const;\n    float get_load_factor() const;\n \nprivate:\n\tfriend class NEEPlusPlusHashGridStorage;\n\n\n    // Maximum VRAM usage in megabytes\n\tfloat m_max_vram_usage_megabytes = 200.0f;\n\n    // Buffers and settings for NEE++\n    NEEPlusPlusHashGridStorage m_nee_plus_plus_storage;\n};\n \n#endif\n "
  },
  {
    "path": "src/Renderer/RenderPasses/ReGIRHashGridStorage.cpp",
    "content": "/**\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/ReGIRHashGridStorage.h\"\n#include \"Renderer/RenderPasses/ReGIRRenderPass.h\"\n\nvoid ReGIRHashGridStorage::set_regir_render_pass(ReGIRRenderPass* regir_render_pass)\n{\n\tm_regir_render_pass = regir_render_pass;\n}\n\nstd::size_t ReGIRHashGridStorage::get_byte_size() const\n{\n\treturn m_presampled_lights.get_byte_size() + \n\n\t\tm_initial_reservoirs_primary_hits_grid.get_byte_size() +\n\t\tm_initial_reservoirs_secondary_hits_grid.get_byte_size() +\n\n\t\tm_spatial_output_primary_hits_grid.get_byte_size() +\n\t\tm_spatial_output_secondary_hits_grid.get_byte_size() +\n\n\t\tm_async_compute_staging_buffer_primary_hits.get_byte_size() +\n\t\tm_async_compute_staging_buffer_secondary_hits.get_byte_size() +\n\n\t\tm_hash_cell_data_primary_hits.get_byte_size() +\n\t\tm_hash_cell_data_secondary_hits.get_byte_size() +\n\n\t\tm_correlation_reduction_grid_primary_hits.get_byte_size() +\n\n\t\tm_canonical_pre_integration_factors_primary_hits.get_byte_size() +\n\t\tm_canonical_pre_integration_factors_secondary_hits.get_byte_size();\n}\n\nbool ReGIRHashGridStorage::pre_render_update(HIPRTRenderData& render_data)\n{\n\tbool updated = false;\n\n\tupdated |= pre_render_update_internal(render_data, true);\n\tupdated |= pre_render_update_internal(render_data, false);\n\n\treturn updated;\n}\n\nbool ReGIRHashGridStorage::pre_render_update_internal(HIPRTRenderData& render_data, bool primary_hit)\n{\n\tReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\tif (render_data.render_settings.nb_bounces == 0 && !primary_hit)\n\t{\n\t\t// For the special case of 0 bounces in the scene, we can free the secondary hits cells because\n\t\t// they are never going to be used\n\t\treturn free_internal(false);\n\t}\n\n\tbool grid_not_allocated = get_total_number_of_cells(primary_hit) == 0;\n\tbool grid_res_changed = m_current_grid_min_cell_size != regir_settings.hash_grid.m_grid_cell_min_size || m_grid_cell_target_projected_size != regir_settings.hash_grid.m_grid_cell_target_projected_size;\n\tbool reservoirs_per_cell_changed = regir_settings.get_number_of_reservoirs_per_cell(primary_hit) != get_initial_grid_buffers(primary_hit).m_reservoirs_per_cell;\n\n\tbool needs_grid_resize = grid_not_allocated || grid_res_changed || reservoirs_per_cell_changed;\n\n\tbool updated = false;\n\tif (needs_grid_resize)\n\t{\n\t\tget_total_number_of_cells(primary_hit) = primary_hit ? ReGIRHashGridStorage::DEFAULT_GRID_CELL_COUNT_PRIMARY_HITS : ReGIRHashGridStorage::DEFAULT_GRID_CELL_COUNT_SECONDARY_HITS; // Default grid size\n\n\t\tm_current_grid_min_cell_size = regir_settings.hash_grid.m_grid_cell_min_size;\n\t\tm_grid_cell_target_projected_size = regir_settings.hash_grid.m_grid_cell_target_projected_size;\n\n\t\t// We need to make sure that the async grid fill is finished before resizing the buffers because\n\t\t// we don't want to resize the buffers while the async grid fill is running\n\t\tm_regir_render_pass->synchronize_async_compute();\n\n\t\tget_initial_grid_buffers(primary_hit).resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\n\t\tget_hash_cell_data_soa(primary_hit).resize(get_total_number_of_cells(primary_hit));\n\n\t\tget_non_canonical_factors(primary_hit).resize(get_total_number_of_cells(primary_hit));\n\t\tget_canonical_factors(primary_hit).resize(get_total_number_of_cells(primary_hit));\n\n\t\tupdated = true;\n\t}\n\n\tif (regir_settings.spatial_reuse.do_spatial_reuse)\n\t{\n\t\tbool spatial_grid_not_allocated = get_spatial_grid_buffers(primary_hit).m_total_number_of_cells == 0;\n\n\t\tbool needs_spatial_grid_resize = spatial_grid_not_allocated || grid_res_changed || reservoirs_per_cell_changed;\n\n\t\tif (needs_spatial_grid_resize)\n\t\t{\n\t\t\t// Resizing the spatial buffer\n\t\t\tget_spatial_grid_buffers(primary_hit).resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\n\t\t\tupdated = true;\n\t\t}\n\t}\n\telse\n\t{\n\t\tif (get_spatial_grid_buffers(primary_hit).m_total_number_of_cells > 0)\n\t\t\tget_spatial_grid_buffers(primary_hit).free();\n\t}\n\n\tif (regir_settings.do_asynchronous_compute)\n\t{\n\t\tbool async_grid_not_allocated = get_async_compute_staging_buffer(primary_hit).m_total_number_of_cells == 0;\n\t\tbool needs_async_grid_resize = async_grid_not_allocated || grid_res_changed || reservoirs_per_cell_changed;\n\n\t\tif (needs_async_grid_resize)\n\t\t\tget_async_compute_staging_buffer(primary_hit).resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\t}\n\telse\n\t{\n\t\tif (get_async_compute_staging_buffer(primary_hit).m_total_number_of_cells > 0)\n\t\t\tget_async_compute_staging_buffer(primary_hit).free();\n\t}\n\n\tif (primary_hit)\n\t{\n\t\tif (regir_settings.supersampling.do_correlation_reduction)\n\t\t{\n\t\t\tbool correlation_reduction_grid_not_allocated = m_correlation_reduction_grid_primary_hits.m_total_number_of_cells == 0;\n\t\t\tbool correlation_reduction_reservoirs_count_changed = regir_settings.get_number_of_reservoirs_per_cell(primary_hit) != m_correlation_reduction_grid_primary_hits.m_reservoirs_per_cell / regir_settings.supersampling.correlation_reduction_factor;\n\t\t\tbool needs_supersample_grid_resize = correlation_reduction_grid_not_allocated || grid_res_changed || correlation_reduction_reservoirs_count_changed;\n\n\t\t\tif (needs_supersample_grid_resize)\n\t\t\t{\n\t\t\t\tm_correlation_reduction_grid_primary_hits.resize(get_total_number_of_cells(true), regir_settings.get_number_of_reservoirs_per_cell(true) * regir_settings.supersampling.correlation_reduction_factor);\n\n\t\t\t\tm_correlation_reduction_current_grid_offset = 0;\n\t\t\t\tm_correlation_reduction_frames_available = 0;\n\n\t\t\t\tupdated = true;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (m_correlation_reduction_grid_primary_hits.m_total_number_of_cells > 0)\n\t\t\t\tm_correlation_reduction_grid_primary_hits.free();\n\t\t}\n\n\t\tif (render_data.render_settings.regir_settings.do_light_presampling)\n\t\t{\n\t\t\tunsigned int presampled_lights_count_needed = render_data.render_settings.regir_settings.presampled_lights.get_presampled_light_count();\n\t\t\tif (m_presampled_lights.size() != presampled_lights_count_needed)\n\t\t\t{\n\t\t\t\t// The async grid fill is using the presampled lights so we need\n\t\t\t\t// to make sure that it's finihsed before resizing\n\t\t\t\tm_regir_render_pass->synchronize_async_compute();\n\n\t\t\t\t// If the current presampled light buffer isn't the right size, resizing\n\t\t\t\tm_presampled_lights.resize(presampled_lights_count_needed);\n\n\t\t\t\tupdated = true;\n\t\t\t}\n\t\t}\n\t}\n\n\treturn updated;\n}\n\nvoid ReGIRHashGridStorage::post_sample_update_async(HIPRTRenderData& render_data)\n{\n\tincrement_supersampling_counters(render_data);\n}\n\nvoid ReGIRHashGridStorage::increment_supersampling_counters(HIPRTRenderData& render_data)\n{\n\tm_correlation_reduction_current_grid_offset++;\n\tm_correlation_reduction_current_grid_offset %= render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor;\n\n\tm_correlation_reduction_frames_available++;\n\tm_correlation_reduction_frames_available = hippt::min(m_correlation_reduction_frames_available, render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor);\n}\n\nbool ReGIRHashGridStorage::try_rehash(HIPRTRenderData& render_data)\n{\n\tbool rehashed = false;\n\n\trehashed |= try_rehash_internal(render_data, true);\n\trehashed |= try_rehash_internal(render_data, false);\n\n\treturn rehashed;\n}\n\nbool ReGIRHashGridStorage::try_rehash_internal(HIPRTRenderData& render_data, bool primary_hit)\n{\n\tReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n\t// We don't need a full reset, instead checking if we need to dynamically grow the size of the hash\n\t// table to keep the load factor in check\n\tfloat cell_alive_ratio = m_regir_render_pass->get_alive_cells_ratio(primary_hit);\t\n\tif (cell_alive_ratio > 0.60f)\n\t{\n\t\tm_regir_render_pass->update_all_cell_alive_count(render_data);\n\n\t\tunsigned int m_grid_cells_alive = m_regir_render_pass->get_number_of_cells_alive(primary_hit);\n\t\tif (m_grid_cells_alive > 0)\n\t\t{\n\t\t\t// Increasing the number of cells\n\t\t\tget_total_number_of_cells(primary_hit) *= 1.5;\n\n\t\t\t// Allocating a larger hash table\n\t\t\tReGIRHashGridSoAHost<OrochiBuffer> new_hash_grid_soa;\n\t\t\tnew_hash_grid_soa.resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\n\t\t\tReGIRHashCellDataSoAHost<OrochiBuffer> new_hash_cell_data;\n\t\t\tnew_hash_cell_data.resize(get_total_number_of_cells(primary_hit));\n\n\t\t\tReGIRHashGridSoADevice new_hash_grid_device;\n\t\t\tnew_hash_grid_soa.to_device(new_hash_grid_device);\n\n\t\t\tReGIRHashCellDataSoADevice new_hash_cell_data_device = new_hash_cell_data.to_device();\n\n\t\t\t// For each cell alive, we're going to insert it in the new, larger, hash table, with a GPU kernel to do that\n\t\t\tm_regir_render_pass->launch_rehashing_kernel(render_data, primary_hit, new_hash_grid_device, new_hash_cell_data_device);\n\n\t\t\tget_initial_grid_buffers(primary_hit) = std::move(new_hash_grid_soa);\n\t\t\tif (regir_settings.spatial_reuse.do_spatial_reuse)\n\t\t\t\tget_spatial_grid_buffers(primary_hit).resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\t\t\tif (regir_settings.supersampling.do_correlation_reduction && primary_hit)\n\t\t\t{\n\t\t\t\tm_correlation_reduction_grid_primary_hits.resize(get_total_number_of_cells(true), regir_settings.get_number_of_reservoirs_per_cell(true) * regir_settings.supersampling.correlation_reduction_factor);\n\n\t\t\t\tm_correlation_reduction_current_grid_offset = 0;\n\t\t\t\tm_correlation_reduction_frames_available = 0;\n\t\t\t}\n\n\t\t\tif (regir_settings.do_asynchronous_compute)\n\t\t\t\tget_async_compute_staging_buffer(primary_hit).resize(get_total_number_of_cells(primary_hit), regir_settings.get_number_of_reservoirs_per_cell(primary_hit));\n\n\t\t\tget_hash_cell_data_soa(primary_hit) = std::move(new_hash_cell_data);\n\n\t\t\tget_non_canonical_factors(primary_hit).resize(get_total_number_of_cells(primary_hit));\n\t\t\tget_canonical_factors(primary_hit).resize(get_total_number_of_cells(primary_hit));\n\n\t\t\t// We need to update the cell alive count because there may have possibly been collisions that couldn't be resolved during the rehashing\n\t\t\t// and maybe some cells could not be reinserted in the new hash table --> the cell alive count is different (lower) --> need to update\n\t\t\tm_regir_render_pass->update_all_cell_alive_count(render_data);\n\n\t\t\treturn true;\n\t\t}\n\t}\n\n\treturn false;\n}\n\nvoid ReGIRHashGridStorage::reset()\n{\n\treset_internal(true);\n\n\tif (m_regir_render_pass->get_renderer()->get_render_data().render_settings.nb_bounces > 0 && get_initial_grid_buffers(false).get_byte_size() > 0)\n\t\t// If the renderer has more than 0 bounce, then we actually have secondary grid cells to reset\n\t\treset_internal(false);\n}\n\nvoid ReGIRHashGridStorage::reset_internal(bool primary_hit)\n{\n\t// Resetting the 'cell alive' buffers\n\tget_hash_cell_data_soa(primary_hit).m_hash_cell_data.template get_buffer<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE>().memset_whole_buffer(0);\n\n\t// Resetting the pre-integration factors buffer\n\tget_non_canonical_factors(primary_hit).memset_whole_buffer(0.0f);\n\tget_canonical_factors(primary_hit).memset_whole_buffer(0.0f);\n\n\t// Resetting the count buffers\n\tget_hash_cell_data_soa(primary_hit).m_grid_cells_alive_count.memset_whole_buffer(0);\n\n\tget_hash_cell_data_soa(primary_hit).m_hash_cell_data.template get_buffer<REGIR_HASH_CELL_PRIM_INDEX>().memset_whole_buffer(ReGIRHashCellDataSoADevice::UNDEFINED_PRIMITIVE);\n\tget_hash_cell_data_soa(primary_hit).m_hash_cell_data.template get_buffer<REGIR_HASH_CELL_CHECKSUMS>().memset_whole_buffer(HashGrid::UNDEFINED_CHECKSUM_OR_GRID_INDEX);\n\n\t// Resetting the reservoirs\n\tget_initial_grid_buffers(primary_hit).reservoirs.get_buffer<ReGIRReservoirSoAHostBuffers::REGIR_RESERVOIR_UCW>().memset_whole_buffer(ReGIRReservoir::UNDEFINED_UCW);\n\tif (m_regir_render_pass->get_renderer()->get_render_data().render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t{\n\t\tif (get_spatial_grid_buffers(primary_hit).reservoirs.get_buffer<ReGIRReservoirSoAHostBuffers::REGIR_RESERVOIR_UCW>().size() > 0)\n\t\t\t// We need to check the size before the reset because the reset method is called before the pre_render_update method\n\t\t\t// (where the buffer is allocated) so this reset call may try to reset a buffer that wasn't allocated\n\t\t\tget_spatial_grid_buffers(primary_hit).reservoirs.get_buffer<ReGIRReservoirSoAHostBuffers::REGIR_RESERVOIR_UCW>().memset_whole_buffer(ReGIRReservoir::UNDEFINED_UCW);\n\t}\n}\n\nbool ReGIRHashGridStorage::free()\n{\n\tbool updated = false;\n\n\tupdated |= free_internal(true);\n\tupdated |= free_internal(false);\n\n\treturn updated;\n}\n\nbool ReGIRHashGridStorage::free_internal(bool primary_hit)\n{\n\tbool updated = false;\n\n\tif (get_initial_grid_buffers(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_initial_grid_buffers(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (get_spatial_grid_buffers(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_spatial_grid_buffers(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (get_async_compute_staging_buffer(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_async_compute_staging_buffer(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (m_correlation_reduction_grid_primary_hits.get_byte_size() > 0 && primary_hit)\n\t{\n\t\tm_correlation_reduction_grid_primary_hits.free();\n\n\t\tupdated = true;\n\t}\n\n\tif (get_hash_cell_data_soa(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_hash_cell_data_soa(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (get_non_canonical_factors(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_non_canonical_factors(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (get_canonical_factors(primary_hit).get_byte_size() > 0)\n\t{\n\t\tget_canonical_factors(primary_hit).free();\n\n\t\tupdated = true;\n\t}\n\n\tif (primary_hit && m_presampled_lights.get_byte_size() > 0)\n\t\t// Only freeing the presampled lights on the first hit by convention\n\t\tm_presampled_lights.free();\n\n\tif (primary_hit)\n\t\tm_total_number_of_cells_primary_hits = 0;\n\telse\n\t\tm_total_number_of_cells_secondary_hits = 0;\n\n\treturn updated;\n}\n\nvoid ReGIRHashGridStorage::clear_pre_integrated_RIS_integral_factors(bool primary_hit)\n{\n\tget_non_canonical_factors(primary_hit).memset_whole_buffer(0.0f);\n\tget_canonical_factors(primary_hit).memset_whole_buffer(0.0f);\n}\n\nvoid ReGIRHashGridStorage::to_device(HIPRTRenderData& render_data)\n{\n\tif (render_data.render_settings.regir_settings.do_light_presampling)\n\t\tm_presampled_lights.to_device(render_data.render_settings.regir_settings.presampled_lights.presampled_lights_soa);\n\n\t// Primary hits grid cells\n\tm_initial_reservoirs_primary_hits_grid.to_device(render_data.render_settings.regir_settings.initial_reservoirs_primary_hits_grid);\n\n\tif (render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t\tm_spatial_output_primary_hits_grid.to_device(render_data.render_settings.regir_settings.spatial_output_primary_hits_grid);\n\n\tif (render_data.render_settings.regir_settings.supersampling.do_correlation_reduction)\n\t\tm_correlation_reduction_grid_primary_hits.to_device(render_data.render_settings.regir_settings.supersampling.correlation_reduction_grid);\n\n\trender_data.render_settings.regir_settings.hash_cell_data_primary_hits = m_hash_cell_data_primary_hits.to_device();\n\n\trender_data.render_settings.regir_settings.non_canonical_pre_integration_factors_primary_hits = get_non_canonical_factors(true).get_atomic_device_pointer();\n\trender_data.render_settings.regir_settings.canonical_pre_integration_factors_primary_hits = get_canonical_factors(true).get_atomic_device_pointer();\n\n\t// Secondary hits grid cells\n\tif (render_data.render_settings.nb_bounces > 0)\n\t{\n\t\tm_initial_reservoirs_secondary_hits_grid.to_device(render_data.render_settings.regir_settings.initial_reservoirs_secondary_hits_grid);\n\n\t\tif (render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t\t\tm_spatial_output_secondary_hits_grid.to_device(render_data.render_settings.regir_settings.spatial_output_secondary_hits_grid);\n\n\t\trender_data.render_settings.regir_settings.hash_cell_data_secondary_hits = m_hash_cell_data_secondary_hits.to_device();\n\n\t\trender_data.render_settings.regir_settings.non_canonical_pre_integration_factors_secondary_hits = get_non_canonical_factors(false).get_atomic_device_pointer();\n\t\trender_data.render_settings.regir_settings.canonical_pre_integration_factors_secondary_hits = get_canonical_factors(false).get_atomic_device_pointer();\n\t}\n}\n\nReGIRHashGridSoAHost<OrochiBuffer>& ReGIRHashGridStorage::get_initial_grid_buffers(bool primary_hit)\n{\n\treturn primary_hit ? m_initial_reservoirs_primary_hits_grid : m_initial_reservoirs_secondary_hits_grid;\n}\n\nReGIRHashGridSoAHost<OrochiBuffer>& ReGIRHashGridStorage::get_spatial_grid_buffers(bool primary_hit)\n{\n\treturn primary_hit ? m_spatial_output_primary_hits_grid : m_spatial_output_secondary_hits_grid;\n}\n\nReGIRHashGridSoAHost<OrochiBuffer>& ReGIRHashGridStorage::get_async_compute_staging_buffer(bool primary_hit)\n{\n\treturn primary_hit ? m_async_compute_staging_buffer_primary_hits : m_async_compute_staging_buffer_secondary_hits;\n}\n\nReGIRHashGridSoADevice ReGIRHashGridStorage::get_async_compute_staging_buffer_device(bool primary_hit)\n{\n\tReGIRHashGridSoADevice output_soa_device;\n\n\tif (primary_hit)\n\t\tm_async_compute_staging_buffer_primary_hits.to_device(output_soa_device);\n\telse\n\t\tm_async_compute_staging_buffer_secondary_hits.to_device(output_soa_device);\n\n\treturn output_soa_device;\n}\n\nReGIRHashCellDataSoAHost<OrochiBuffer>& ReGIRHashGridStorage::get_hash_cell_data_soa(bool primary_hit)\n{\n\treturn primary_hit ? m_hash_cell_data_primary_hits : m_hash_cell_data_secondary_hits;\n}\n\nReGIRHashCellDataSoADevice& ReGIRHashGridStorage::get_hash_cell_data_device_soa(ReGIRSettings& regir_settings, bool primary_hit)\n{\n\treturn regir_settings.get_hash_cell_data_soa(primary_hit);\n}\n\nOrochiBuffer<float>& ReGIRHashGridStorage::get_non_canonical_factors(bool primary_hit)\n{\n\treturn primary_hit ? m_non_canonical_pre_integration_factors_primary_hits : m_non_canonical_pre_integration_factors_secondary_hits;\n}\n\nOrochiBuffer<float>& ReGIRHashGridStorage::get_canonical_factors(bool primary_hit)\n{\n\treturn primary_hit ? m_canonical_pre_integration_factors_primary_hits : m_canonical_pre_integration_factors_secondary_hits;\n}\n\nunsigned int& ReGIRHashGridStorage::get_total_number_of_cells(bool primary_hit)\n{\n\treturn primary_hit ? m_total_number_of_cells_primary_hits : m_total_number_of_cells_secondary_hits;\n}\n\nunsigned int ReGIRHashGridStorage::get_total_number_of_cells(bool primary_hit) const\n{\n\treturn primary_hit ? m_total_number_of_cells_primary_hits : m_total_number_of_cells_secondary_hits;\n}\n\nunsigned int ReGIRHashGridStorage::get_supersampling_current_frame() const\n{\n\treturn m_correlation_reduction_current_grid_offset;\n}\n\nunsigned int ReGIRHashGridStorage::get_supersampling_frames_available() const\n{\n\treturn m_correlation_reduction_frames_available;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReGIRHashGridStorage.h",
    "content": "/**\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef REGIR_HASH_GRID_STORAGE_H\n#define REGIR_HASH_GRID_STORAGE_H\n\n#include \"HostDeviceCommon/RenderData.h\"\n\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRHashGridSoAHost.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRHashCellDataSoAHost.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRPresampledLightsSoAHost.h\"\n\nclass ReGIRRenderPass;\n\nclass ReGIRHashGridStorage\n{\npublic:\n\tstatic constexpr unsigned int DEFAULT_GRID_CELL_COUNT_PRIMARY_HITS = 5000;\n\tstatic constexpr unsigned int DEFAULT_GRID_CELL_COUNT_SECONDARY_HITS = 10000;\n\n\tvoid set_regir_render_pass(ReGIRRenderPass* regir_render_pass);\n\n\tstd::size_t get_byte_size() const;\n\n\tbool pre_render_update(HIPRTRenderData& render_data);\n\tvoid post_sample_update_async(HIPRTRenderData& render_data);\n\tvoid increment_supersampling_counters(HIPRTRenderData& render_data);\n\tbool try_rehash(HIPRTRenderData& render_data);\n\tvoid reset();\n\tbool free();\n\n\tvoid clear_pre_integrated_RIS_integral_factors(bool primary_hit);\n\n\tvoid to_device(HIPRTRenderData& render_data);\n\n\tReGIRHashGridSoAHost<OrochiBuffer>& get_initial_grid_buffers(bool primary_hit);\n\tReGIRHashGridSoAHost<OrochiBuffer>& get_spatial_grid_buffers(bool primary_hit);\n\tReGIRHashGridSoAHost<OrochiBuffer>& get_async_compute_staging_buffer(bool primary_hit);\n\tReGIRHashGridSoADevice get_async_compute_staging_buffer_device(bool primary_hit);\n\tReGIRHashCellDataSoAHost<OrochiBuffer>& get_hash_cell_data_soa(bool primary_hit);\n\tReGIRHashCellDataSoADevice& get_hash_cell_data_device_soa(ReGIRSettings& regir_settings, bool primary_hit);\n\tOrochiBuffer<float>& get_non_canonical_factors(bool primary_hit);\n\tOrochiBuffer<float>& get_canonical_factors(bool primary_hit);\n\tunsigned int& get_total_number_of_cells(bool primary_hit);\n\tunsigned int get_total_number_of_cells(bool primary_hit) const;\n\n\tunsigned int get_supersampling_current_frame() const;\n\tunsigned int get_supersampling_frames_available() const;\n\npublic:\n\tvoid reset_internal(bool primary_hit);\n\tbool pre_render_update_internal(HIPRTRenderData& render_data, bool primary_hit);\n\tbool try_rehash_internal(HIPRTRenderData& render_data, bool primary_hit);\n\tbool free_internal(bool primary_hit);\n\n\tReGIRRenderPass* m_regir_render_pass = nullptr;\n\n\tReGIRPresampledLightsSoAHost<OrochiBuffer> m_presampled_lights;\n\n\t// Buffer that contains the ReGIR grid. If temporal reuse is enabled,\n\t// this buffer will contain one more than one grid worth of space to\n\t// accomodate for the grid of the past frames for temporal reuse\n\tReGIRHashGridSoAHost<OrochiBuffer> m_initial_reservoirs_primary_hits_grid;\n\tReGIRHashGridSoAHost<OrochiBuffer> m_initial_reservoirs_secondary_hits_grid;\n\tReGIRHashGridSoAHost<OrochiBuffer> m_spatial_output_primary_hits_grid;\n\tReGIRHashGridSoAHost<OrochiBuffer> m_spatial_output_secondary_hits_grid;\n\n\t// For filling the grid asynchronously, we sometimes (depending on the spatial reuse settings etc...)\n\t// need another buffer to store the results of the async compute without overriding the buffers\n\t// that the path tracing kernels are currently using to shade\n\tReGIRHashGridSoAHost<OrochiBuffer> m_async_compute_staging_buffer_primary_hits;\n\tReGIRHashGridSoAHost<OrochiBuffer> m_async_compute_staging_buffer_secondary_hits;\n\n\tint m_correlation_reduction_current_grid_offset = 0;\n\tint m_correlation_reduction_frames_available = 0;\n\tReGIRHashGridSoAHost<OrochiBuffer> m_correlation_reduction_grid_primary_hits;\n\n\t// Stores the pre-integrated RIS integral for each cell in the grid\n\tOrochiBuffer<float> m_non_canonical_pre_integration_factors_primary_hits;\n\tOrochiBuffer<float> m_non_canonical_pre_integration_factors_secondary_hits;\n\tOrochiBuffer<float> m_canonical_pre_integration_factors_primary_hits;\n\tOrochiBuffer<float> m_canonical_pre_integration_factors_secondary_hits;\n\n\tReGIRHashCellDataSoAHost<OrochiBuffer> m_hash_cell_data_primary_hits;\n\tReGIRHashCellDataSoAHost<OrochiBuffer> m_hash_cell_data_secondary_hits;\n\n\tfloat m_current_grid_min_cell_size = 0.0f;\n\tfloat m_grid_cell_target_projected_size = 0.0f;\n\n\tunsigned int m_total_number_of_cells_primary_hits = 0;\n\tunsigned int m_total_number_of_cells_secondary_hits = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReGIRRenderPass.cpp",
    "content": "/**\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/ReGIRRenderPass.h\"\n\n#include \"UI/RenderWindow.h\"\n\nconst std::string ReGIRRenderPass::REGIR_GRID_PRE_POPULATE = \"ReGIR Pre-population\";\nconst std::string ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING = \"ReGIR Light presampling\";\nconst std::string ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID = \"ReGIR Grid fill 1st hits\";\nconst std::string ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID = \"ReGIR Grid fill 2nd hits\";\nconst std::string ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID = \"ReGIR Spatial reuse 1st hits\";\nconst std::string ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID = \"ReGIR Spatial reuse 2nd hits\";\nconst std::string ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID = \"ReGIR Pre-integration\";\nconst std::string ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID = \"ReGIR Pre-integration grid fill\";\nconst std::string ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID = \"ReGIR Pre-integration spatial reuse\";\nconst std::string ReGIRRenderPass::REGIR_REHASH_KERNEL_ID = \"ReGIR Rehash kernel\";\nconst std::string ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID = \"ReGIR Supersampling copy\";\n\nconst std::string ReGIRRenderPass::REGIR_RENDER_PASS_NAME = \"ReGIR Render Pass\";\n\nconst std::unordered_map<std::string, std::string> ReGIRRenderPass::KERNEL_FUNCTION_NAMES =\n{\n\t{ REGIR_GRID_PRE_POPULATE, \"ReGIR_Grid_Prepopulate\" },\n\t{ REGIR_GRID_FILL_LIGHT_PRESAMPLING, \"ReGIR_Light_Presampling\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID, \"ReGIR_Grid_Fill_Temporal_Reuse\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID, \"ReGIR_Grid_Fill_Temporal_Reuse\" },\n\t{ REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID, \"ReGIR_Spatial_Reuse\" },\n\t{ REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID, \"ReGIR_Spatial_Reuse\" },\n\t{ REGIR_PRE_INTEGRATION_KERNEL_ID , \"ReGIR_Pre_integration\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID, \"ReGIR_Grid_Fill_Temporal_Reuse\"},\n\t{ REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID, \"ReGIR_Spatial_Reuse\"},\n\t{ REGIR_REHASH_KERNEL_ID, \"ReGIR_Rehash\" },\n\t{ REGIR_SUPERSAMPLING_COPY_KERNEL_ID, \"ReGIR_Supersampling_Copy\" },\n};\n\nconst std::unordered_map<std::string, std::string> ReGIRRenderPass::KERNEL_FILES =\n{\n\t{ REGIR_GRID_PRE_POPULATE, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/GridPrepopulate.h\" },\n\t{ REGIR_GRID_FILL_LIGHT_PRESAMPLING, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/LightPresampling.h\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/GridFillTemporalReuse.h\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/GridFillTemporalReuse.h\" },\n\t{ REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/SpatialReuse.h\" },\n\t{ REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/SpatialReuse.h\" },\n\t{ REGIR_PRE_INTEGRATION_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/PreIntegration.h\" },\n\t{ REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/GridFillTemporalReuse.h\"},\n\t{ REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/SpatialReuse.h\"},\n\t{ REGIR_REHASH_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/Rehash.h\" },\n\t{ REGIR_SUPERSAMPLING_COPY_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/ReGIR/SupersamplingCopy.h\" },\n};\n\nReGIRRenderPass::ReGIRRenderPass(GPURenderer* renderer) : RenderPass(renderer, ReGIRRenderPass::REGIR_RENDER_PASS_NAME)\n{\n\tm_hash_grid_storage.set_regir_render_pass(this);\n\tOROCHI_CHECK_ERROR(oroStreamCreate(&m_pre_integration_async_stream));\n\tOROCHI_CHECK_ERROR(oroStreamCreate(&m_grid_fill_async_stream_primary_hits));\n\tOROCHI_CHECK_ERROR(oroStreamCreate(&m_grid_fill_async_stream_secondary_hits));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_oro_event));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_event_pre_integration_duration_start));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_event_pre_integration_duration_stop));\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_compiler_options = m_renderer->get_global_compiler_options();\n\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_GRID_PRE_POPULATE));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_GRID_PRE_POPULATE));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\n\n\n\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\tstd::unordered_set<std::string> options_not_synchronized = GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED;\n\toptions_not_synchronized.insert(GPUKernelCompilerOptions::BSDF_OVERRIDE);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->synchronize_options_with(global_compiler_options, options_not_synchronized);\n\t// Always using a Lambertian BRDF for filling the secondary hits of the grid fill pass because we don't\n\t// want to use the BSDF of the surface for that since we don't have the proper view direction\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::BSDF_OVERRIDE, BSDF_LAMBERTIAN);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->synchronize_options_with(global_compiler_options, options_not_synchronized);\n\t// Always using a Lambertian BRDF for filling the secondary hits of the grid fill pass because we don't\n\t// want to use the BSDF of the surface for that since we don't have the proper view direction\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::BSDF_OVERRIDE, BSDF_LAMBERTIAN);\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\n\n\n\t\t\n\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\n\toptions_not_synchronized = GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED;\n\toptions_not_synchronized.insert(GPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->synchronize_options_with(global_compiler_options, options_not_synchronized);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION, KERNEL_OPTION_TRUE);\n\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->synchronize_options_with(global_compiler_options, options_not_synchronized);\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_SPATIAL_REUSE_ACCUMULATE_PRE_INTEGRATION, KERNEL_OPTION_TRUE);\n\n\n\n\n\n\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_REHASH_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_REHASH_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\n\tm_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID]->set_kernel_file_path(ReGIRRenderPass::KERNEL_FILES.at(ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID));\n\tm_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID]->set_kernel_function_name(ReGIRRenderPass::KERNEL_FUNCTION_NAMES.at(ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID));\n}\n\nbool ReGIRRenderPass::pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tif (!is_render_pass_used())\n\t\treturn false;\n\n\tbool updated = false;\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\n\n\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\n\n\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\n\n\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\tif (!m_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID]->has_been_compiled())\n\t{\n\t\tupdated = true;\n\t\tm_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\t}\n\n\treturn updated;\n}\n\nbool ReGIRRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\tReGIRSettings& regir_settings = render_data.render_settings.regir_settings;\n\n\tbool updated = false;\n\n\t// We wouldn't want to resize/whatever pre_render_update does to the buffers\n\t// while async compute is filling them so synchronization here\n\tsynchronize_async_compute();\n\n\tif (is_render_pass_used())\n\t{\n\t\tbool storage_updated = m_hash_grid_storage.pre_render_update(render_data);\n\t\tif (storage_updated)\n\t\t\tm_grid_cells_alive_count_staging_host_pinned_buffer.resize_host_pinned_mem(1);\n\n\t\tupdated |= storage_updated;\n\t}\n\telse\n\t{\n\t\tif (m_hash_grid_storage.free())\n\t\t\tupdated = true;\n\t}\n\n\treturn updated;\n}\n\nvoid callback_reset_imgui_status_text(void* payload)\n{\n\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(payload);\n\n\trender_window->clear_ImGui_status_text();\n}\n\n/**\n * Returns whichever of the two candidates isn't 'buffer'\n */\nReGIRHashGridSoADevice get_non_equal_buffer(ReGIRHashGridSoADevice candidate_A, ReGIRHashGridSoADevice candidate_B, ReGIRHashGridSoADevice buffer)\n{\n\treturn buffer.reservoirs.UCW == candidate_A.reservoirs.UCW ? candidate_B : candidate_A;\n}\n\n/**\n * Returns whichever of the three candidates isn't 'buffer1' and also isn't 'buffer2'\n */\nReGIRHashGridSoADevice get_non_equal_buffer(ReGIRHashGridSoADevice candidate_A, ReGIRHashGridSoADevice candidate_B, ReGIRHashGridSoADevice candidate_C, ReGIRHashGridSoADevice buffer1, ReGIRHashGridSoADevice buffer2)\n{\n\tif (candidate_A.reservoirs.UCW != buffer1.reservoirs.UCW && candidate_A.reservoirs.UCW != buffer2.reservoirs.UCW)\n\t\treturn candidate_A;\n\n\tif (candidate_B.reservoirs.UCW != buffer1.reservoirs.UCW && candidate_B.reservoirs.UCW != buffer2.reservoirs.UCW)\n\t\treturn candidate_B;\n\n\tif (candidate_C.reservoirs.UCW != buffer1.reservoirs.UCW && candidate_C.reservoirs.UCW != buffer2.reservoirs.UCW)\n\t\treturn candidate_C;\n\n\treturn ReGIRHashGridSoADevice();\n}\n\nbool ReGIRRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\n\tsynchronize_async_compute();\n\n\tif (render_data.render_settings.sample_number == 0 && !m_render_window->is_interacting())\n\t{\n\t\tm_render_window->set_ImGui_status_text(\"ReGIR Prepopulation pass...\");\n\t\tlaunch_grid_pre_population(render_data);\n\n\t\tm_render_window->set_ImGui_status_text(\"ReGIR Supersampling fill...\");\n\t\tlaunch_supersampling_fill(render_data);\n\n\t\tm_render_window->set_ImGui_status_text(\"ReGIR Pre-integration...\");\n\t\tlaunch_pre_integration(render_data);\n\n\t\tOROCHI_CHECK_ERROR(oroLaunchHostFunc(m_renderer->get_main_stream(), callback_reset_imgui_status_text, m_render_window));\n\t}\n\n\tbool full_grid_fill_needed = false;\n\tbool rehashed = rehash(render_data);\n\tif (rehashed)\n\t{\n\t\t// A rehashing with supersampling enabled will empty the supersampling grid so we need to fill it again\n\t\tm_render_window->set_ImGui_status_text(\"ReGIR Supersampling fill...\");\n\t\tlaunch_supersampling_fill(render_data);\n\n\t\t// Same with the pre integration factors of the grid cells\n\t\tm_render_window->set_ImGui_status_text(\"ReGIR Pre-integration...\");\n\t\tlaunch_pre_integration(render_data);\n\n\t\tOROCHI_CHECK_ERROR(oroLaunchHostFunc(m_renderer->get_main_stream(), callback_reset_imgui_status_text, m_render_window));\n\n\t\t// If we rehashed the grid, we're going to need a full grid re-fill for this frame\n\t\tfull_grid_fill_needed = true;\n\t}\n\n\trender_data.render_settings.regir_settings.supersampling.correl_reduction_current_grid = m_hash_grid_storage.get_supersampling_current_frame();\n\trender_data.render_settings.regir_settings.supersampling.correl_frames_available = m_hash_grid_storage.get_supersampling_frames_available();\n\n\t// If this is the first sample, we have no frame before that that could fill the grid asynchronously\n\t// so we're going to need to fully fill the grid now\n\tfull_grid_fill_needed |= render_data.render_settings.sample_number == 0;\n\tfull_grid_fill_needed |= !render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse;\n\tfull_grid_fill_needed |= !render_data.render_settings.regir_settings.do_asynchronous_compute;\n\tif (full_grid_fill_needed)\n\t\t// At each frame, launch_async_grid_fill() is called which fills the grid asynchronously\n\t\t// (at the same time as the path tracing kernels execute). This means that when we get here,\n\t\t// the grid is already filled and we only need to launch spatial reuse.\n\t\t//\n\t\t// But the grid can somehow be resized (rehashed), which means that all the content of the grid\n\t\t// is cleared and so all that was filled asynchronously is lost so we need a full grid refill here\n\t\tlaunch_sync_grid_fill(render_data, rehashed);\n\n\t// Positioning the actual spatial reuse output buffers\n\trender_data.render_settings.regir_settings.actual_spatial_output_buffers_primary_hits = m_last_spatial_reuse_output_buffer_primary_hits;\n\trender_data.render_settings.regir_settings.actual_spatial_output_buffers_secondary_hits = m_last_spatial_reuse_output_buffer_secondary_hits;\n\n\t// Launching an synchronous grid fill such that the grid fill for *next* frame can execute\n\t// while the path tracing kernels are running.\n\t//\n\t// This is not a concurrency issue with the path tracing kernels because the path tracing kernels\n\t// only read from the spatial reuse output buffers, and we're only filling the grid fill output buffers\n\t// here. The spatial reuse buffers are untouched.\n\t//\n\t// If spatial reuse is disabled, then this asynchronous grid fill is indeed a race concurrency with the\n\t// path tracing kernels (and that's why the async grid fill isn't run if spatial reuse is disabled. The check\n\t// for that is in the async grid fill function).\n\tlaunch_async_grid_fill(render_data);\n\n\treturn true;\n}\n\nvoid ReGIRRenderPass::launch_sync_grid_fill(HIPRTRenderData& render_data, bool bypass_skip_frame)\n{\n\t// Execute a full grid fill synchronously (from the point of view of the GPU\n\t// CUDA/HIP streams, this is still asynchronous for the CPU: not blocking for the CPU)\n\tlaunch_light_presampling(render_data, m_renderer->get_main_stream());\n\n\tbool skip_frame_primary_hits = render_data.render_settings.sample_number % (render_data.render_settings.regir_settings.frame_skip_primary_hit_grid + 1) != 0;\n\tif (m_number_of_cells_alive_primary_hits > 0 && (!skip_frame_primary_hits || bypass_skip_frame))\n\t{\n\t\tlaunch_grid_fill_temporal_reuse(render_data, true, false, m_renderer->get_main_stream());\n\t\tm_last_spatial_reuse_output_buffer_primary_hits = launch_spatial_reuse(render_data, true, false, m_renderer->get_main_stream());\n\t}\n\n\tbool skip_frame_secondary_hits = render_data.render_settings.sample_number % (render_data.render_settings.regir_settings.frame_skip_secondary_hit_grid + 1) != 0;\n\tif (m_number_of_cells_alive_secondary_hits > 0 && (!skip_frame_secondary_hits || bypass_skip_frame))\n\t{\n\t\tlaunch_grid_fill_temporal_reuse(render_data, false, false, m_renderer->get_main_stream());\n\t\tm_last_spatial_reuse_output_buffer_secondary_hits = launch_spatial_reuse(render_data, false, false, m_renderer->get_main_stream());\n\t}\n}\n\nvoid ReGIRRenderPass::launch_async_grid_fill(HIPRTRenderData& render_data)\n{\n\tif (!render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t\t// Disabling async compute if we do not have spatial reuse enabled just for implementation\n\t\t// simplicity\n\t\treturn;\n\telse if (!render_data.render_settings.regir_settings.do_asynchronous_compute)\n\t\t// We don't want async compute\n\t\treturn;\n\n\t// TODO do this with events instead of CPU blocking stream synchronizations\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_renderer->get_main_stream()));\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_pre_integration_async_stream));\n\n\t// We're going to launch the grid fill for the next frame now on an async stream such\n\t// that we can fill the grid of the *next* frame while the path tracing of the *current* frame\n\t// is running\n\n\tlaunch_light_presampling(render_data, m_grid_fill_async_stream_primary_hits);\n\n\t// 2 iterations for first hits and secondary hits\n\tfor (int i = 0; i < 2; i++)\n\t{\n\t\tbool primary_hit = i == 0;\n\n\t\toroStream_t async_stream = m_grid_fill_async_stream_primary_hits;\n\n\t\t// Checking if the *next* frame (sample number + 1) needs a grid fill\n\t\tint frame_skip = primary_hit ? render_data.render_settings.regir_settings.frame_skip_primary_hit_grid : render_data.render_settings.regir_settings.frame_skip_secondary_hit_grid;\n\t\tbool skip_frame = (render_data.render_settings.sample_number + 1) % (frame_skip + 1) != 0;\n\t\tunsigned int number_of_cells_alive = primary_hit ? m_number_of_cells_alive_primary_hits : m_number_of_cells_alive_secondary_hits;\n\t\tif (number_of_cells_alive > 0 && !skip_frame)\n\t\t{\n\t\t\t// We need to be careful about which buffer we're going to use to store the async grid fill\n\t\t\t// results because we don't to override the spatial reuse buffer that the path tracing kernels\n\t\t\t// are actively using for shading\n\t\t\t//\n\t\t\t// We have two buffers that may be read into by the path tracing kernels: either they are going to\n\t\t\t// read from the 'initial grid fill buffers' or the 'spatial output buffer'\n\t\t\t//\n\t\t\t// With multiple spatial reuse passes however, the spatial reuse pass may store its final reuse pass output\n\t\t\t// into the 'initial grid fill buffers', depending on whether we have an odd or even number of spatial reuse\n\t\t\t// passes.\n\t\t\t//\n\t\t\t// In any case, what we want to do is simple: the async compute should fill in the buffer that is not being used\n\t\t\t// by the path tracing kernels which is the buffer that the spatial reuse passes did not fill at the end\n\t\t\tReGIRHashGridSoADevice buffer_used_by_pt_kernels = primary_hit ? render_data.render_settings.regir_settings.actual_spatial_output_buffers_primary_hits : render_data.render_settings.regir_settings.actual_spatial_output_buffers_secondary_hits;;\n\t\t\tReGIRHashGridSoADevice output_reservoirs_async_grid_fill = get_non_equal_buffer(\n\t\t\t\trender_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit), \n\t\t\t\trender_data.render_settings.regir_settings.get_raw_spatial_output_reservoirs_grid(primary_hit),\n\t\t\t\tbuffer_used_by_pt_kernels);\n\n\t\t\tlaunch_grid_fill_temporal_reuse(render_data, output_reservoirs_async_grid_fill, primary_hit, false, async_stream);\n\n\t\t\t// Same for the sptial reuse as for the grid fill: we're going to use the buffer that is not being used by the path tracing kernels\n\t\t\t// and that is not the buffer that is input to the spatial reuse (because we don't want to store into the buffer which we're reading\n\t\t\t// from in the spatial reuse pass, that would be a race condition)\n\t\t\tReGIRHashGridSoADevice output_reservoirs_async_spatial_reuse = get_non_equal_buffer(\n\t\t\t\trender_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit),\n\t\t\t\trender_data.render_settings.regir_settings.get_raw_spatial_output_reservoirs_grid(primary_hit),\n\t\t\t\tm_hash_grid_storage.get_async_compute_staging_buffer_device(primary_hit),\n\n\t\t\t\tbuffer_used_by_pt_kernels,\n\t\t\t\toutput_reservoirs_async_grid_fill);\n\n\t\t\tReGIRHashGridSoADevice& last_spatial_output_buffer = primary_hit ? m_last_spatial_reuse_output_buffer_primary_hits : m_last_spatial_reuse_output_buffer_secondary_hits;\n\t\t\tlast_spatial_output_buffer = launch_spatial_reuse(render_data, output_reservoirs_async_grid_fill, output_reservoirs_async_spatial_reuse, primary_hit, false, async_stream);\n\t\t}\n\t}\n}\n\nvoid ReGIRRenderPass::launch_grid_pre_population(HIPRTRenderData& render_data)\n{\n\tbool has_rehashed = false;\n\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tdo\n\t{\n\t\tupdate_all_cell_alive_count(render_data);\n\n\t\tvoid* launch_args[] = { &render_data };\n\n\t\t// Only launching / 4 in each dimension because we don't need a super high precision for the grid pre-population.\n\t\t// \n\t\t// We just need some rays bouncing around the scene but that's it\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_PRE_POPULATE]->launch_synchronous(\n\t\t\tKernelBlockWidthHeight, KernelBlockWidthHeight,\n\t\t\tm_renderer->m_render_resolution.x / ReGIR_GridPrepopulationResolutionDownscale, m_renderer->m_render_resolution.y / ReGIR_GridPrepopulationResolutionDownscale,\n\t\t\tlaunch_args);\n\n\t\thas_rehashed = rehash(render_data);\n\t} while (has_rehashed);\n}\n\nbool ReGIRRenderPass::rehash(HIPRTRenderData& render_data)\n{\n\tupdate_all_cell_alive_count(render_data);\n\n\tif (m_hash_grid_storage.try_rehash(render_data))\n\t{\n\t\tm_hash_grid_storage.to_device(m_renderer->get_render_data());\n\t\t\n\t\t// We also want the local 'render_data' parameter here to be updated such\n\t\t// that the grid fill and spatial reuse passes can use the rehashed (and resized) grid\n\t\tm_hash_grid_storage.to_device(render_data);\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid ReGIRRenderPass::launch_light_presampling(HIPRTRenderData& render_data, oroStream_t stream)\n{\n\tif (!render_data.render_settings.regir_settings.do_light_presampling)\n\t\treturn;\n\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tunsigned int nb_threads = render_data.render_settings.regir_settings.presampled_lights.get_presampled_light_count();\n\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_LIGHT_PRESAMPLING]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n}\n\nvoid ReGIRRenderPass::launch_grid_fill_temporal_reuse(HIPRTRenderData& render_data, ReGIRHashGridSoADevice grid_fill_output_reservoirs_grid, bool primary_hit, bool for_pre_integration, oroStream_t stream)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tunsigned int number_of_cells_alive = primary_hit ? m_number_of_cells_alive_primary_hits : m_number_of_cells_alive_secondary_hits;\n\tunsigned int reservoirs_per_cell = render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(primary_hit);\n\n\tvoid* launch_args[] = { &render_data, &grid_fill_output_reservoirs_grid, &number_of_cells_alive, &primary_hit };\n\n\t// Only launching a maximum of render_resolution.x * render_resolution.y thread at a time.\n\t// \n\t// Why? Because with visibility reuse, we're shooting rays from the kernel.\n\t// Shooting rays uses the global stack buffer (and shared mem) for the BVH traversal and the global\n\t// stack buffer is limited in size (it is sized by the number of pixels on the screen since\n\t// it's usually used for tracing one ray per pixel). So we need to limit the number of rays\n\t// that are launched per each kernel here\n\t//\n\t// So we're launching the kernel with a maximum of render_resolution.x * render_resolution.y threads so that\n\t// we don't overrun the global BVH traversal stack buffer\n\t//\n\t// To make sure one kernel launch still covers all the reservoirs that we have to cover, the kernel code\n\t// uses a while loop such that a single thread potentially computes more than 1 reservoir\n\tunsigned int nb_threads = hippt::min(number_of_cells_alive * reservoirs_per_cell, (unsigned int)(render_data.render_settings.render_resolution.x * render_data.render_settings.render_resolution.y));\n\tif (nb_threads == 0)\n\t\t// No grid cell alive to fill\n\t\treturn;\n\t\n\tif (for_pre_integration)\n\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\telse\n\t{\n\t\tif (primary_hit)\n\t\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\t\telse\n\t\t\tm_kernels[ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\t}\n}\n\nvoid ReGIRRenderPass::launch_grid_fill_temporal_reuse(HIPRTRenderData& render_data, bool primary_hit, bool for_pre_integration, oroStream_t stream)\n{\n\tReGIRHashGridSoADevice output_reservoirs_grid = render_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit);\n\n\tlaunch_grid_fill_temporal_reuse(render_data, output_reservoirs_grid, primary_hit, for_pre_integration, stream);\n}\n\nReGIRHashGridSoADevice ReGIRRenderPass::launch_spatial_reuse(HIPRTRenderData& render_data, ReGIRHashGridSoADevice first_input_reservoirs, ReGIRHashGridSoADevice first_output_reservoirs, bool primary_hit, bool for_pre_integration, oroStream_t stream)\n{\n\tif (!render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t\treturn first_input_reservoirs;\n\n\tReGIRHashCellDataSoADevice output_reservoirs_cell_data = render_data.render_settings.regir_settings.get_hash_cell_data_soa(primary_hit);\n\t\n\tunsigned int number_of_cells_alive = primary_hit ? m_number_of_cells_alive_primary_hits : m_number_of_cells_alive_secondary_hits;\n\tunsigned int reservoirs_per_cell = render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(primary_hit);\n\t\n\tfor (int i = 0; i < render_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_count; i++)\n\t{\n\t\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\t\trender_data.render_settings.regir_settings.spatial_reuse.spatial_reuse_pass_index = i;\n\t\t\n\t\tvoid* launch_args[] = { &render_data, &first_input_reservoirs, &first_output_reservoirs, &output_reservoirs_cell_data, &number_of_cells_alive, &primary_hit };\n\n\t\t// Same reason for nb_threads here as explained in the GridFill kernel launch\n\t\tunsigned int nb_threads = hippt::min(number_of_cells_alive * reservoirs_per_cell, (unsigned int)(render_data.render_settings.render_resolution.x * render_data.render_settings.render_resolution.y));\n\t\tif (nb_threads == 0)\n\t\t\t// No grid cell alive to spatially reuse\n\t\t\treturn ReGIRHashGridSoADevice();\n\n\t\tif (for_pre_integration)\n\t\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\t\telse\n\t\t{\n\t\t\tif (primary_hit)\n\t\t\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\t\t\telse\n\t\t\t\tm_kernels[ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID]->launch_asynchronous(64, 1, nb_threads, 1, launch_args, stream);\n\t\t}\n\n\t\t// Swapping the input and output for the next spatial reuse apss (if any)\n\t\tstd::swap(first_input_reservoirs, first_output_reservoirs);\n\t}\n\n\t// Returning the reservoirs into which the spatial reuse pass last output the result\n\t//\n\t// This is the 'input' buffer and not 'output' because of the std::swap that happens on the last iteration\n\treturn first_input_reservoirs;\n}\n\nReGIRHashGridSoADevice ReGIRRenderPass::launch_spatial_reuse(HIPRTRenderData& render_data, bool primary_hit, bool for_pre_integration, oroStream_t stream)\n{\n\tReGIRHashGridSoADevice input_reservoirs = render_data.render_settings.regir_settings.get_initial_reservoirs_grid(primary_hit);\n\tReGIRHashGridSoADevice output_reservoirs = render_data.render_settings.regir_settings.get_raw_spatial_output_reservoirs_grid(primary_hit);\n\n\treturn launch_spatial_reuse(render_data, input_reservoirs, output_reservoirs, primary_hit, for_pre_integration, stream);\n}\n\nvoid ReGIRRenderPass::launch_supersampling_fill(HIPRTRenderData& render_data)\n{\n\tif (!render_data.render_settings.regir_settings.supersampling.do_correlation_reduction)\n\t\treturn;\n\n\tunsigned int seed_backup = render_data.random_number;\n\n\tfor (int i = 0; i < render_data.render_settings.regir_settings.supersampling.correlation_reduction_factor; i++)\n\t{\n\t\trender_data.random_number = m_local_rng.xorshift32();\n\n\t\tlaunch_light_presampling(render_data, m_renderer->get_main_stream());\n\t\tlaunch_grid_fill_temporal_reuse(render_data, true, false, m_renderer->get_main_stream());\n\t\tReGIRHashGridSoADevice spatial_output = launch_spatial_reuse(render_data, true, false, m_renderer->get_main_stream());\n\t\tlaunch_supersampling_copy(render_data, spatial_output);\n\n\t\tm_hash_grid_storage.increment_supersampling_counters(render_data);\n\n\t\trender_data.render_settings.regir_settings.supersampling.correl_reduction_current_grid = m_hash_grid_storage.get_supersampling_current_frame();\n\t\trender_data.render_settings.regir_settings.supersampling.correl_frames_available = m_hash_grid_storage.get_supersampling_frames_available();\n\t}\n\n\trender_data.random_number = seed_backup;\n}\n\nvoid ReGIRRenderPass::launch_supersampling_copy(HIPRTRenderData& render_data, ReGIRHashGridSoADevice input_reservoirs_to_copy)\n{\n\tif (!render_data.render_settings.regir_settings.supersampling.do_correlation_reduction)\n\t\treturn;\n\n\tvoid* launch_args[] = { &render_data, &input_reservoirs_to_copy };\n\n\tunsigned int nb_threads =  m_number_of_cells_alive_primary_hits * render_data.render_settings.regir_settings.get_number_of_reservoirs_per_cell(true);\n\tif (nb_threads == 0)\n\t\t// No cell alive to copy\n\t\treturn;\n\n\tm_kernels[ReGIRRenderPass::REGIR_SUPERSAMPLING_COPY_KERNEL_ID]->launch_asynchronous(64, 1,nb_threads, 1, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReGIRRenderPass::launch_supersampling_copy(HIPRTRenderData& render_data)\n{\n\tReGIRHashGridSoADevice to_copy;\n\tif (render_data.render_settings.regir_settings.spatial_reuse.do_spatial_reuse)\n\t\tto_copy = render_data.render_settings.regir_settings.get_actual_spatial_output_reservoirs_grid(true);\n    else\n        to_copy = render_data.render_settings.regir_settings.get_initial_reservoirs_grid(true);\n\n\tlaunch_supersampling_copy(render_data, to_copy);\n}\n\nvoid ReGIRRenderPass::launch_pre_integration(HIPRTRenderData& render_data)\n{\n\tupdate_all_cell_alive_count(render_data);\n\n\t// --------------- Record the start of the overall pre integration process\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_event_pre_integration_duration_start, m_renderer->get_main_stream()));\n\t// --------------- Record the start of the overall pre integration process\n\n\n\n\n\n\t// Adjusting the number of samples per reservoir just for the pre-integration pass.\n\t// TODO: is this really integrating correctly? If we do not have the same number of samples per reservoir during pre-integratrion, are we really getting the correct PDF?\n\tunsigned int backup = render_data.render_settings.regir_settings.grid_fill_settings_primary_hits.light_sample_count_per_cell_reservoir;\n\trender_data.render_settings.regir_settings.grid_fill_settings_primary_hits.light_sample_count_per_cell_reservoir = render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_SAMPLE_COUNT_PER_RESERVOIR;\n\trender_data.render_settings.regir_settings.grid_fill_settings_secondary_hits.light_sample_count_per_cell_reservoir = render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_SAMPLE_COUNT_PER_RESERVOIR;\n\n\t// Clearing the pre integration buffer before accumulating new pre integration data into them\n\tm_hash_grid_storage.clear_pre_integrated_RIS_integral_factors(true);\n\tif (m_number_of_cells_alive_secondary_hits > 0)\n\t\tm_hash_grid_storage.clear_pre_integrated_RIS_integral_factors(false);\n\n\t// Important to launch the pre integration for the secondary hits first\n\t// so that we can then \n\tlaunch_pre_integration_internal(render_data, true, m_pre_integration_async_stream);\n\t// The primary hit pre-integration is going to happen on the secondary stream so\n\t// for everything to be in order we're going to have the main stream wait for the completion\n\t// of the first hit pre-integration.\n\t//\n\t// Recording an event after the first pre-integration is over\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_oro_event, m_pre_integration_async_stream));\n\n\t// Launching the pre integration for the secondary hits on another stream such that the pre integration\n\t// for primary and secondary hits can execute in parallell\n\tlaunch_pre_integration_internal(render_data, false, m_renderer->get_main_stream());\n\n\t// Waiting to be sure that the pre-integration for the first hits is over before continuing\n\tOROCHI_CHECK_ERROR(oroStreamWaitEvent(m_renderer->get_main_stream(), m_oro_event, /* oroEventWaitDefault */ 0));\n\n\n\n\n\t\n\t// --------------- Record the end of the overall pre integration process\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_event_pre_integration_duration_stop, m_renderer->get_main_stream()));\n\t// --------------- Record the end of the overall pre integration process\n\n\trender_data.render_settings.regir_settings.grid_fill_settings_primary_hits.light_sample_count_per_cell_reservoir = backup;\n\trender_data.render_settings.regir_settings.grid_fill_settings_secondary_hits.light_sample_count_per_cell_reservoir = backup;\n\n\tm_pre_integration_executed = true;\n}\n\nvoid ReGIRRenderPass::launch_pre_integration_internal(HIPRTRenderData& render_data, bool primary_hit, oroStream_t stream)\n{\n\tunsigned int seed_backup = render_data.random_number;\n\tunsigned int nb_cells_alive = primary_hit ? m_number_of_cells_alive_primary_hits : m_number_of_cells_alive_secondary_hits;\n\tunsigned int nb_threads = hippt::min(nb_cells_alive, (unsigned int)(render_data.render_settings.render_resolution.x * render_data.render_settings.render_resolution.y));\n\n\tif (nb_cells_alive == 0)\n\t\treturn;\n\n\tfor (int i = 0; i < render_data.render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS; i++)\n\t{\n\t\trender_data.random_number = m_local_rng.xorshift32();\n\n\t\tlaunch_light_presampling(render_data, stream);\n\t\tlaunch_grid_fill_temporal_reuse(render_data, primary_hit, true, stream);\n\t\tlaunch_spatial_reuse(render_data, primary_hit, true, stream);\n\t}\n\n\trender_data.random_number = seed_backup;\n}\n\nvoid ReGIRRenderPass::launch_rehashing_kernel(HIPRTRenderData& render_data, bool primary_hit, ReGIRHashGridSoADevice& new_hash_grid_soa, ReGIRHashCellDataSoADevice& new_hash_cell_data)\n{\n\tif (render_data.render_settings.nb_bounces == 0 && !primary_hit)\n\t\t// Rehashing for the secondary hits but we don't have secondary hit grid cells because the renderer is doing 0 bounces\n\t\treturn;\n\n\tunsigned int* cell_alive_list_ptr = m_hash_grid_storage.get_hash_cell_data_soa(primary_hit).m_hash_cell_data.template get_buffer_data_ptr<ReGIRHashCellDataSoAHostBuffers::REGIR_HASH_CELLS_ALIVE_LIST>();\n\tunsigned int old_cell_count = m_hash_grid_storage.get_hash_cell_data_soa(primary_hit).size();\n\tunsigned int old_cell_alive_count = primary_hit ? m_number_of_cells_alive_primary_hits : m_number_of_cells_alive_secondary_hits;\n\n\t// The old number of cells alive is the number of cells that we're going to have to rehash\n\t\n\tvoid* launch_args[] = { \n\t\t&render_data.current_camera,\n\t\t\n\t\t&render_data.render_settings.regir_settings.hash_grid,\n\t\t&new_hash_grid_soa, &new_hash_cell_data,\n\t\t\n\t\t&m_hash_grid_storage.get_hash_cell_data_device_soa(render_data.render_settings.regir_settings, primary_hit),\n\t\t&cell_alive_list_ptr, // old cell alive list\n\t\t&old_cell_alive_count,\n\n\t\t&primary_hit\n\t};\n\t\n\tm_kernels[ReGIRRenderPass::REGIR_REHASH_KERNEL_ID]->launch_synchronous(64, 1, old_cell_alive_count, 1, launch_args);\n}\n\nvoid ReGIRRenderPass::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn;\n\n\tlaunch_supersampling_copy(render_data);\n\n\tm_hash_grid_storage.post_sample_update_async(render_data);\n}\n\nvoid ReGIRRenderPass::update_render_data()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (is_render_pass_used())\n\t\tm_hash_grid_storage.to_device(render_data);\n\telse\n\t{\n\t\trender_data.render_settings.regir_settings.initial_reservoirs_primary_hits_grid = ReGIRHashGridSoADevice();\n\t\trender_data.render_settings.regir_settings.initial_reservoirs_secondary_hits_grid = ReGIRHashGridSoADevice();\n\t\trender_data.render_settings.regir_settings.spatial_output_primary_hits_grid = ReGIRHashGridSoADevice();\n\t\trender_data.render_settings.regir_settings.spatial_output_secondary_hits_grid = ReGIRHashGridSoADevice();\n\n\t\trender_data.render_settings.regir_settings.hash_cell_data_primary_hits = ReGIRHashCellDataSoADevice();\n\t\trender_data.render_settings.regir_settings.hash_cell_data_secondary_hits = ReGIRHashCellDataSoADevice();\n\t}\n}\n\nvoid ReGIRRenderPass::synchronize_async_compute()\n{\n\t// Synchronizing and waiting for the asynchronous grid fill launched last frame (for this frame's grid)\n\t// to finish\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_grid_fill_async_stream_primary_hits));\n\tOROCHI_CHECK_ERROR(oroStreamSynchronize(m_grid_fill_async_stream_secondary_hits));\n}\n\nvoid ReGIRRenderPass::compute_render_times()\n{\n\tif (!is_render_pass_used())\n\t\t// No times to compute if the render pass is disabled / not being used\n\t\treturn;\n\n\t// The default implementation iterates over all kernels and adds their time to the\n\t// render pass times of the renderer\n\tstd::unordered_map<std::string, float>& render_pass_times = m_renderer->get_render_pass_times();\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t{\n\t\tfloat execution_time = m_kernels[name_to_kernel.first]->compute_execution_time();\n\n\t\t// Scaling the execution time based on the frame skip settings because if skipping 1 frame\n\t\t// for example, the grid fill and spatial reuse kernels essentially run every 2 frames so\n\t\t// they take twice as less time to run overall\n\t\tconst std::string& kernel_name = name_to_kernel.first;\n\t\tif (kernel_name == ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID || kernel_name == ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID)\n\t\t\texecution_time /= m_renderer->get_render_data().render_settings.regir_settings.frame_skip_primary_hit_grid + 1;\n\t\telse if (kernel_name == ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID || kernel_name == ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID)\n\t\t\texecution_time /= m_renderer->get_render_data().render_settings.regir_settings.frame_skip_secondary_hit_grid + 1;\n\t\telse if (kernel_name == ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID && m_pre_integration_executed)\n\t\t{\n\t\t\t// Special case for the pre integration where we want to take into account the whole time\n\t\t\t// including the grid fill / spatial reuse passes of the pre integration and all the\n\t\t\t// pre integration passes at the same time.\n\t\t\t//\n\t\t\t// If we didn't override that behavior, the pre integration time would just be the time that the\n\t\t\t// last pre integration kernel took which is clearly inaccurate\n\n\t\t\tfloat duration;\n\t\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&duration, m_event_pre_integration_duration_start, m_event_pre_integration_duration_stop));\n\t\t\trender_pass_times[name_to_kernel.first] = duration;\n\n\t\t\tcontinue;\n\t\t}\n\n\t\trender_pass_times[name_to_kernel.first] = execution_time;\n\t}\n}\n\nvoid ReGIRRenderPass::update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics)\n{\n\tif (!is_render_pass_used())\n\t\t// No metrics to update if the render pass is disabled / not being used\n\t\treturn;\n\n\t// Add the render pass times computed by 'compute_render_times()' (which was called before\n\t// 'update_perf_metrics') into the performance metrics computer\n\tstd::unordered_map<std::string, float>& render_pass_times = m_renderer->get_render_pass_times();\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t{\n\t\tfloat execution_time = render_pass_times[name_to_kernel.first];\n\n\t\tconst std::string& kernel_name = name_to_kernel.first;\n\t\tif (kernel_name == ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID || kernel_name == ReGIRRenderPass::REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID)\n\t\t\texecution_time /= m_renderer->get_render_data().render_settings.regir_settings.frame_skip_primary_hit_grid + 1;\n\t\telse if (kernel_name == ReGIRRenderPass::REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID || kernel_name == ReGIRRenderPass::REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID)\n\t\t\texecution_time /= m_renderer->get_render_data().render_settings.regir_settings.frame_skip_secondary_hit_grid + 1;\n\t\telse if (kernel_name == ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID && m_pre_integration_executed)\n\t\t{\n\t\t\t// Special case for the pre integration where we want to take into account the whole time\n\t\t\t// including the grid fill / spatial reuse passes of the pre integration and all the\n\t\t\t// pre integration passes at the same time.\n\t\t\t//\n\t\t\t// If we didn't override that behavior, the pre integration time would just be the time that the\n\t\t\t// last pre integration kernel took which is clearly inaccurate\n\n\t\t\tfloat duration;\n\t\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&duration, m_event_pre_integration_duration_start, m_event_pre_integration_duration_stop));\n\t\t\tperf_metrics->add_value(name_to_kernel.first, duration);\n\n\t\t\tcontinue;\n\t\t}\n\n\t\tperf_metrics->add_value(name_to_kernel.first, execution_time);\n\t}\n}\n\nfloat ReGIRRenderPass::get_full_frame_time()\n{\n\tfloat sum = 0.0f;\n\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t{\n\t\tif (name_to_kernel.first == ReGIRRenderPass::REGIR_PRE_INTEGRATION_KERNEL_ID ||\n\t\t\tname_to_kernel.first == ReGIRRenderPass::REGIR_GRID_PRE_POPULATE ||\n\t\t\tname_to_kernel.first == ReGIRRenderPass::REGIR_REHASH_KERNEL_ID)\n\t\t\t// Pre integration and pre population passes are a bit exceptional\n\t\t\t// so we don't want to include them in the frame time\n\t\t\tcontinue;\n\n\t\tsum += name_to_kernel.second->get_last_execution_time();\n\t}\n\n\treturn sum;\n}\n\nvoid ReGIRRenderPass::reset(bool reset_by_camera_movement)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\t// We wouldn't want to reset the buffers while async compute is filling them\n\t// so synchronization here \n\tsynchronize_async_compute();\n\n\tif (m_hash_grid_storage.get_byte_size() > 0)\n\t\tm_hash_grid_storage.reset();\n}\n\nbool ReGIRRenderPass::is_render_pass_used() const\n{\n\treturn m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR;\n}\n\nfloat ReGIRRenderPass::get_VRAM_usage() const\n{\n\treturn (m_hash_grid_storage.get_byte_size()) / 1000000.0f;\n}\n\nunsigned int ReGIRRenderPass::get_number_of_cells_alive(bool primary_hit) const\n{\n\treturn primary_hit ? m_number_of_cells_alive_primary_hits  : m_number_of_cells_alive_secondary_hits;\n}\n\nunsigned int ReGIRRenderPass::get_total_number_of_cells_alive(bool primary_hit) const\n{\n\treturn m_hash_grid_storage.get_total_number_of_cells(primary_hit);\n}\n\nGPURenderer* ReGIRRenderPass::get_renderer()\n{\n\treturn m_renderer;\n}\n\nvoid ReGIRRenderPass::update_all_cell_alive_count(HIPRTRenderData& render_data)\n{\n\tm_hash_grid_storage.get_hash_cell_data_soa(true).m_grid_cells_alive_count.download_data_into(m_grid_cells_alive_count_staging_host_pinned_buffer.get_host_pinned_pointer());\n\tm_number_of_cells_alive_primary_hits = m_grid_cells_alive_count_staging_host_pinned_buffer.get_host_pinned_pointer()[0];\n\n\tif (render_data.render_settings.nb_bounces > 0)\n\t{\n\t\tm_hash_grid_storage.get_hash_cell_data_soa(false).m_grid_cells_alive_count.download_data_into(m_grid_cells_alive_count_staging_host_pinned_buffer.get_host_pinned_pointer());\n\t\tm_number_of_cells_alive_secondary_hits = m_grid_cells_alive_count_staging_host_pinned_buffer.get_host_pinned_pointer()[0];\n\t}\n\telse\n\t\t// No bounces = no secondary hit cells\n\t\tm_number_of_cells_alive_secondary_hits = 0;\n}\n\nfloat ReGIRRenderPass::get_alive_cells_ratio(bool primary_hit) const\n{\n\tunsigned int total_number_of_cells = m_hash_grid_storage.get_total_number_of_cells(primary_hit);\n\n\tif (total_number_of_cells == 0)\n\t\treturn 0.0f;\n\n\treturn get_number_of_cells_alive(primary_hit) / static_cast<float>(total_number_of_cells);\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReGIRRenderPass.h",
    "content": "/**\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef REGIR_RENDER_PASS_H\n#define REGIR_RENDER_PASS_H\n\n#include \"Renderer/RenderPasses/RenderPass.h\"\n#include \"Renderer/RenderPasses/ReGIRHashGridStorage.h\"\n#include \"Renderer/CPUGPUCommonDataStructures/ReGIRHashCellDataSoAHost.h\"\n\nclass GPURenderer;\n\nclass ReGIRRenderPass: public RenderPass\n{\npublic:\n\tstatic const std::string REGIR_GRID_PRE_POPULATE;\n\tstatic const std::string REGIR_GRID_FILL_LIGHT_PRESAMPLING;\n\tstatic const std::string REGIR_GRID_FILL_TEMPORAL_REUSE_FIRST_HITS_KERNEL_ID;\n\tstatic const std::string REGIR_GRID_FILL_TEMPORAL_REUSE_SECONDARY_HITS_KERNEL_ID;\n\tstatic const std::string REGIR_SPATIAL_REUSE_FIRST_HITS_KERNEL_ID;\n\tstatic const std::string REGIR_SPATIAL_REUSE_SECONDARY_HITS_KERNEL_ID;\n\tstatic const std::string REGIR_PRE_INTEGRATION_KERNEL_ID;\n\tstatic const std::string REGIR_GRID_FILL_TEMPORAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID;\n\tstatic const std::string REGIR_SPATIAL_REUSE_FOR_PRE_INTEGRATION_KERNEL_ID;\n\tstatic const std::string REGIR_REHASH_KERNEL_ID;\n\tstatic const std::string REGIR_SUPERSAMPLING_COPY_KERNEL_ID;\n\n\tstatic const std::string REGIR_RENDER_PASS_NAME;\n\n\t/**\n\t * This map contains constants that are the name of the main function of the kernels, their entry points.\n\t * They are used when compiling the kernels.\n\t *\n\t * This means that if you define your camera ray kernel main function as:\n\t *\n\t * GLOBAL_KERNEL_SIGNATURE(void) CameraRays(HIPRTRenderData render_data, int2 res)\n\t *\n\t * Then KERNEL_FUNCTION_NAMES[CAMERA_RAYS_KERNEL_ID] = \"CameraRays\"\n\t */\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FUNCTION_NAMES;\n\n\t/**\n\t * Same as 'KERNELfUNCTION_NAMES' but for kernel files\n\t */\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FILES;\n\n\tReGIRRenderPass() {}\n\tReGIRRenderPass(GPURenderer* renderer);\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override {};\n\n\tvirtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) override;\n\tvirtual bool pre_render_update(float delta_time) override;\n\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tvoid launch_sync_grid_fill(HIPRTRenderData& render_data, bool bypass_skip_frame);\n\tvoid launch_async_grid_fill(HIPRTRenderData& render_data);\n\t/**\n\t * The prepass in ReGIR is used to shoot rays in every directions from the G-Buffer to discover how many grid cells\n\t * are going to be needed for the ReGIR grid.\n\t */\n\tvoid launch_grid_pre_population(HIPRTRenderData& render_data);\n\tbool rehash(HIPRTRenderData& render_data);\n\n\tvoid launch_light_presampling(HIPRTRenderData& render_data, oroStream_t stream);\n\tvoid launch_grid_fill_temporal_reuse(HIPRTRenderData& render_data, ReGIRHashGridSoADevice grid_fill_output_reservoirs_grid, bool primary_hit, bool for_pre_integration, oroStream_t stream);\n\tvoid launch_grid_fill_temporal_reuse(HIPRTRenderData& render_data, bool primary_hit, bool for_pre_integration, oroStream_t stream);\n\t/**\n\t * Returns the hash grid buffer into which the spatial reuse output the result\n\t */\n\tReGIRHashGridSoADevice launch_spatial_reuse(HIPRTRenderData& render_data, ReGIRHashGridSoADevice first_input_reservoirs, ReGIRHashGridSoADevice first_output_reservoirs, bool primary_hit, bool for_pre_integration, oroStream_t stream);\n\tReGIRHashGridSoADevice launch_spatial_reuse(HIPRTRenderData& render_data, bool primary_hit, bool for_pre_integration, oroStream_t stream);\n\tvoid launch_supersampling_fill(HIPRTRenderData& render_data);\n\tvoid launch_supersampling_copy(HIPRTRenderData& render_data, ReGIRHashGridSoADevice input_reservoirs_to_copy);\n\tvoid launch_supersampling_copy(HIPRTRenderData& render_data);\n\tvoid launch_pre_integration(HIPRTRenderData& render_data);\n\tvoid launch_pre_integration_internal(HIPRTRenderData& render_data, bool primary_hit, oroStream_t stream);\n\tvoid launch_rehashing_kernel(HIPRTRenderData& render_data, bool primary_hit, ReGIRHashGridSoADevice& new_hash_grid_soa, ReGIRHashCellDataSoADevice& new_hash_cell_data);\n\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void update_render_data() override;\n\n\tvoid synchronize_async_compute();\n\n\t/**\n\t * These 2 functions are overriden just to allow a custom handling of the 'frame skip\" feature\n\t * such that a frame skip of 3 really reflects that the grid fill pass takes 3 times less time\n\t */\n\tvirtual void compute_render_times() override;\n\tvirtual void update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics) override;\n\tvirtual float get_full_frame_time() override;\n\n\tvirtual void reset(bool reset_by_camera_movement) override;\n\n\tvirtual bool is_render_pass_used() const override;\n\n\t/**\n\t * Returns the VRAM used by ReSTIR DI in MB\n\t */\n\tfloat get_VRAM_usage() const;\n\n\t/**\n\t * Returns the total number of cells currently used by the hash grid\n\t */\n\tunsigned int get_number_of_cells_alive(bool primary_hit) const;\n\tunsigned int get_total_number_of_cells_alive(bool primary_hit) const;\n\n\tGPURenderer* get_renderer();\n\n\tvoid update_all_cell_alive_count(HIPRTRenderData& render_data);\n\tfloat get_alive_cells_ratio(bool primary_hit) const;\n\t\nprivate:\n\tunsigned int m_number_of_cells_alive_primary_hits = 0;\n\tunsigned int m_number_of_cells_alive_secondary_hits = 0;\n\n\tXorshift32Generator m_local_rng = Xorshift32Generator(42);\n\tOrochiBuffer<unsigned int> m_grid_cells_alive_count_staging_host_pinned_buffer;\n\n\tReGIRHashGridStorage m_hash_grid_storage;\n\t// The grid that async compute last stored into\n\t/*ReGIRHashGridSoADevice m_last_async_compute_store_buffers_first_hits;\n\tReGIRHashGridSoADevice m_last_async_compute_store_buffers_secondary_hits;*/\n\t// Stores the pointers to the buffers that the last spatial reues output into\n\tReGIRHashGridSoADevice m_last_spatial_reuse_output_buffer_primary_hits;\n\tReGIRHashGridSoADevice m_last_spatial_reuse_output_buffer_secondary_hits;\n\n\toroStream_t m_pre_integration_async_stream = nullptr;\n\toroStream_t m_grid_fill_async_stream_primary_hits = nullptr;\n\toroStream_t m_grid_fill_async_stream_secondary_hits = nullptr;\n\toroEvent_t m_oro_event = nullptr;\n\toroEvent_t m_event_pre_integration_duration_start = nullptr;\n\toroEvent_t m_event_pre_integration_duration_stop = nullptr;\n\t// Just a flag to make sure that the pre integration pass indeed ran otherwise,\n\t// if it didn't run, we cannot compute the GPU events elapsed times\n\tbool m_pre_integration_executed = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRDIRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/ReSTIRDIRenderPass.h\"\n#include \"Renderer/RenderPasses/ReSTIRRenderPassCommon.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID = \"ReSTIR DI Initial candidates\";\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID = \"ReSTIR DI Temporal reuse\";\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID = \"ReSTIR DI Spatial reuse\";\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID = \"ReSTIR DI Spatiotemporal reuse\";\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID = \"ReSTIR DI Lights presampling\";\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID = \"ReSTIR DI Directional reuse compute\";\n\nconst std::string ReSTIRDIRenderPass::RESTIR_DI_RENDER_PASS_NAME = \"ReSTIR DI Render Pass\";\n\nconst std::unordered_map<std::string, std::string> ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES =\n{\n\t{ RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID, \"ReSTIR_DI_InitialCandidates\" },\n\t{ RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID, \"ReSTIR_DI_TemporalReuse\" },\n\t{ RESTIR_DI_SPATIAL_REUSE_KERNEL_ID, \"ReSTIR_DI_SpatialReuse\" },\n\t{ RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID, \"ReSTIR_DI_SpatiotemporalReuse\" },\n\t{ RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID, \"ReSTIR_DI_LightsPresampling\" },\n\t{ RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID, ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_KERNEL_FUNCTION_NAME },\n};\n\nconst std::unordered_map<std::string, std::string> ReSTIRDIRenderPass::KERNEL_FILES =\n{\n\t{ RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DI/InitialCandidates.h\" },\n\t{ RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DI/TemporalReuse.h\" },\n\t{ RESTIR_DI_SPATIAL_REUSE_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DI/SpatialReuse.h\" },\n\t{ RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DI/FusedSpatiotemporalReuse.h\" },\n\t{ RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DI/LightsPresampling.h\" },\n\t{ RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID, ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_KERNEL_FILE },\n};\n\nReSTIRDIRenderPass::ReSTIRDIRenderPass(GPURenderer* renderer) : RenderPass(renderer, ReSTIRDIRenderPass::RESTIR_DI_RENDER_PASS_NAME)\n{\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_spatial_reuse_time_start));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_spatial_reuse_time_stop));\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_compiler_options = m_renderer->get_global_compiler_options();\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 16);\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 16);\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 24);\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 0);\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->set_kernel_file_path(ReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->set_kernel_function_name(ReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID));\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->synchronize_options_with(global_compiler_options);\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->get_kernel_options().set_macro_value(ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_IS_RESTIR_GI_COMPILE_OPTION_NAME, KERNEL_OPTION_FALSE);\n}\n\nvoid ReSTIRDIRenderPass::precompile_kernels(GPUKernelCompilerOptions partial_options, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n\tGPUKernelCompilerOptions options;\n\n\toptions = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->get_kernel_options().deep_copy();\n\tpartial_options.apply_onto(options);\n\tThreadManager::start_thread(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS, ThreadFunctions::precompile_kernel,\n\t\tReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID),\n\t\tReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID),\n\t\toptions, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\toptions = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->get_kernel_options().deep_copy();\n\tpartial_options.apply_onto(options);\n\tThreadManager::start_thread(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS, ThreadFunctions::precompile_kernel,\n\t\tReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID),\n\t\tReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID),\n\t\toptions, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\toptions = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->get_kernel_options().deep_copy();\n\tpartial_options.apply_onto(options);\n\tThreadManager::start_thread(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS, ThreadFunctions::precompile_kernel,\n\t\tReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID),\n\t\tReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID),\n\t\toptions, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\toptions = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().deep_copy();\n\tpartial_options.apply_onto(options);\n\tThreadManager::start_thread(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS, ThreadFunctions::precompile_kernel,\n\t\tReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID),\n\t\tReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID),\n\t\toptions, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\toptions = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().deep_copy();\n\tpartial_options.apply_onto(options);\n\tThreadManager::start_thread(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS, ThreadFunctions::precompile_kernel,\n\t\tReSTIRDIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID),\n\t\tReSTIRDIRenderPass::KERNEL_FILES.at(ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID),\n\t\toptions, hiprt_orochi_ctx, std::ref(func_name_sets));\n\n\tThreadManager::detach_threads(ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS);\n}\n\nbool ReSTIRDIRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tbool render_data_invalidated = false;\n\n\tint2 render_resolution = m_renderer->m_render_resolution;\n\n\tif (is_render_pass_used())\n\t{\n\t\t// ReSTIR DI enabled\n\t\tbool initial_candidates_reservoir_needs_resize = m_initial_candidates_reservoirs.size() == 0;\n\t\tbool spatial_output_1_needs_resize = m_spatial_output_reservoirs_1.size() == 0;\n\t\tbool spatial_output_2_needs_resize = m_spatial_output_reservoirs_2.size() == 0;\n\n\t\tif (initial_candidates_reservoir_needs_resize || spatial_output_1_needs_resize || spatial_output_2_needs_resize)\n\t\t\t// At least on buffer is going to be resized so buffers are invalidated\n\t\t\trender_data_invalidated = true;\n\n\t\tif (initial_candidates_reservoir_needs_resize)\n\t\t\tm_initial_candidates_reservoirs.resize(render_resolution.x * render_resolution.y);\n\n\t\tif (spatial_output_1_needs_resize)\n\t\t\tm_spatial_output_reservoirs_1.resize(render_resolution.x * render_resolution.y);\n\n\t\tif (spatial_output_2_needs_resize)\n\t\t\tm_spatial_output_reservoirs_2.resize(render_resolution.x * render_resolution.y);\n\n\n\n\t\t// Also allocating / deallocating the presampled lights buffer\n\t\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_TRUE)\n\t\t{\n\t\t\tReSTIRDISettings& restir_di_settings = m_renderer->get_render_settings().restir_di_settings;\n\t\t\tint presampled_light_count = restir_di_settings.light_presampling.number_of_subsets * restir_di_settings.light_presampling.subset_size;\n\t\t\tbool presampled_lights_needs_allocation = m_presampled_lights_buffer.size() != presampled_light_count;\n\n\t\t\tif (presampled_lights_needs_allocation)\n\t\t\t{\n\t\t\t\tm_presampled_lights_buffer.resize(presampled_light_count);\n\n\t\t\t\t// At least on buffer is going to be resized so buffers are invalidated\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (m_presampled_lights_buffer.size() > 0)\n\t\t\t\tm_presampled_lights_buffer.free();\n\t\t}\n\n\t\trender_data_invalidated |= ReSTIRRenderPassCommon::pre_render_update_directional_reuse_buffers<false>(render_data, m_renderer,\n\t\t\tm_per_pixel_spatial_reuse_radius,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\t}\n\telse\n\t{\n\t\t// ReSTIR DI disabled, we're going to free the buffers if that's not already done\n\t\tif (m_initial_candidates_reservoirs.size() > 0)\n\t\t{\n\t\t\tm_initial_candidates_reservoirs.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (m_spatial_output_reservoirs_1.size() > 0)\n\t\t{\n\t\t\tm_spatial_output_reservoirs_1.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (m_spatial_output_reservoirs_2.size() > 0)\n\t\t{\n\t\t\tm_spatial_output_reservoirs_2.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (m_presampled_lights_buffer.size() > 0)\n\t\t{\n\t\t\tm_presampled_lights_buffer.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\trender_data_invalidated |= ReSTIRRenderPassCommon::free_directional_reuse_buffers<false>(\n\t\t\tm_per_pixel_spatial_reuse_radius, \n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\t}\n\n\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.auto_reuse_radius)\n\t\t// A percentage of the maximum render resolution extent for automatic spatial reuse radius\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * ReSTIRRenderPassCommon::AUTO_SPATIAL_RADIUS_RESOLUTION_PERCENTAGE;\n\n\treturn render_data_invalidated;\n}\n\nvoid ReSTIRDIRenderPass::update_render_data()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\t// Setting the pointers for use in reset_render() in the camera rays kernel\n\tif (is_render_pass_used())\n\t{\n\t\tReSTIRRenderPassCommon::update_render_data_common_buffers<false>(render_data, \n\t\t\tm_per_pixel_spatial_reuse_radius,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u, \n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\n\t\t// If we just got ReSTIR enabled back, setting this one arbitrarily and resetting its content\n\t\tm_last_restir_output_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\t\tm_spatial_output_reservoirs_1.upload_data(std::vector<ReSTIRDIReservoir>(m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y, ReSTIRDIReservoir()));\n\t}\n\telse\n\t{\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u = nullptr;\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull = nullptr;\n\t\trender_data.render_settings.restir_di_settings.common_spatial_pass.per_pixel_spatial_reuse_radius = nullptr;\n\t}\n}\n\nvoid ReSTIRDIRenderPass::resize(unsigned int new_width, unsigned int new_height)\n{\n\tif (!is_render_pass_used())\n\t\treturn;\n\n\tm_initial_candidates_reservoirs.resize(new_width * new_height);\n\tm_spatial_output_reservoirs_2.resize(new_width * new_height);\n\tm_spatial_output_reservoirs_1.resize(new_width * new_height);\n\n\tReSTIRRenderPassCommon::resize_directional_reuse_buffers<false>(m_renderer, new_width, new_height, \n\t\tm_per_pixel_spatial_reuse_radius, \n\t\tm_per_pixel_spatial_reuse_direction_mask_u, \n\t\tm_per_pixel_spatial_reuse_direction_mask_ull);\n}\n\nbool ReSTIRDIRenderPass::pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tif (!is_render_pass_used())\n\t\treturn false;\n\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tbool recompiled = false;\n\n\tbool need_spatiotemporal = m_renderer->get_render_settings().restir_di_settings.do_fused_spatiotemporal && !m_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_spatiotemporal;\n\tif (need_spatiotemporal)\n\t\t// Spatiotemporal is needed but hasn't been compiled yet\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_temporal = m_renderer->get_render_settings().restir_di_settings.common_temporal_pass.do_temporal_reuse_pass && !m_renderer->get_render_settings().restir_di_settings.do_fused_spatiotemporal && !m_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_temporal;\n\tif (need_temporal)\n\t\t// Temporal needed\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_spatial = m_renderer->get_render_settings().restir_di_settings.common_spatial_pass.do_spatial_reuse_pass && !m_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_spatial;\n\tif (need_spatial)\n\t\t// Spatial needed\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_presampling = m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_TRUE && !m_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_presampling;\n\tif (need_presampling)\n\t\t// Light pre sampling needed\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_directional_spatial_reuse = !m_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->has_been_compiled() && render_data.render_settings.restir_di_settings.common_spatial_pass.use_adaptive_directional_spatial_reuse;\n\trecompiled |= need_directional_spatial_reuse;\n\tif (need_directional_spatial_reuse)\n\t\t// Directional spatial reuse needed\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\treturn recompiled;\n}\n\nvoid ReSTIRDIRenderPass::reset(bool reset_by_camera_movement)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif(!is_render_pass_used())\n\t\treturn;\n\n\tif (render_data.render_settings.need_to_reset)// reset_by_camera_movement && render_data.render_settings.accumulate)\n\t{\n\t\tstd::vector<ReSTIRDIReservoir> empty_reservoirs(m_initial_candidates_reservoirs.size());\n\n\t\tif (m_initial_candidates_reservoirs.size() > 0)\n\t\t\tm_initial_candidates_reservoirs.upload_data(empty_reservoirs);\n\n\t\tif (m_spatial_output_reservoirs_1.size() > 0)\n\t\t\tm_spatial_output_reservoirs_1.upload_data(empty_reservoirs);\n\n\t\tif (m_spatial_output_reservoirs_2.size() > 0)\n\t\t\tm_spatial_output_reservoirs_2.upload_data(empty_reservoirs);\n\t}\n\n\todd_frame = false;\n}\n\nbool ReSTIRDIRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\n\tReSTIRDISettings& restir_di_settings = m_renderer->get_render_data().render_settings.restir_di_settings;\n\n\t// Resetting the flag here just to know that we need not to read the spatial reuse\n\t// pass oroEvents (if that flag isn't set to true before)\n\tm_spatial_reuse_events_recorded = false;\n\n\t// If ReSTIR DI is enabled\n\n\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_TRUE)\n\t\tlaunch_presampling_lights_pass(render_data);\n\n\tcompute_optimal_spatial_reuse_radii(render_data);\n\n\tlaunch_initial_candidates_pass(render_data);\n\n\tif (render_data.render_settings.restir_di_settings.do_fused_spatiotemporal)\n\t\t// Launching the fused spatiotemporal kernel\n\t\tlaunch_spatiotemporal_pass(render_data);\n\telse\n\t{\n\t\t// Launching the temporal and spatial passes separately\n\n\t\tif (restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\tlaunch_temporal_reuse_pass(render_data);\n\n\t\tif (restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\t\tlaunch_spatial_reuse_passes(render_data);\n\t}\n\n\tconfigure_output_buffer(render_data);\n\n\todd_frame = !odd_frame;\n\n\treturn true;\n}\n\nvoid ReSTIRDIRenderPass::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\t// If we had requested a temporal buffers clear, this has be done by this frame so we can\n\t// now reset the flag\n\tm_temporal_buffer_clear_requested = false;\n}\n\nvoid ReSTIRDIRenderPass::compute_optimal_spatial_reuse_radii(HIPRTRenderData& render_data)\n{\n\tbool accumulating = render_data.render_settings.accumulate;\n\tbool first_frame = render_data.render_settings.sample_number == 0;\n\tbool not_interacting = render_data.render_settings.wants_render_low_resolution == false;\n\tbool using_adaptive_directional_spatial_reuse = render_data.render_settings.restir_di_settings.common_spatial_pass.use_adaptive_directional_spatial_reuse;\n\n\tif (accumulating && first_frame && not_interacting && using_adaptive_directional_spatial_reuse)\n\t{\n\t\t// If we're not accumulating, we have no guarantee that the camera isn't moving and so\n\t\t// there isn't really an \"optimal\" reuse radius per pixel to find\n\t\t//\n\t\t// But if the camera isn't moving, then the neighborhood of a pixel is fixed and we can optimize\n\t\t// the best spatial reuse radius\n\t\t//\n\t\t// Also, we're only doing this as a \"prepass\" at sample 0: we only need this once for the whole rendering\n\n\t\tunsigned int* per_pixel_spatial_reuse_direction_mask_u = m_per_pixel_spatial_reuse_direction_mask_u.size() > 0 ? m_per_pixel_spatial_reuse_direction_mask_u.data() : nullptr;\n\t\tunsigned long long int* per_pixel_spatial_reuse_direction_mask_ull = m_per_pixel_spatial_reuse_direction_mask_ull.size() > 0 ? m_per_pixel_spatial_reuse_direction_mask_ull.data() : nullptr;\n\t\tunsigned char* per_pixel_spatial_reuse_radius = m_per_pixel_spatial_reuse_radius.data();\n\t\tvoid* launch_args[] = { &render_data, &per_pixel_spatial_reuse_direction_mask_u, &per_pixel_spatial_reuse_direction_mask_ull, &per_pixel_spatial_reuse_radius };\n\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\t}\n}\n\nLightPresamplingParameters ReSTIRDIRenderPass::configure_light_presampling_pass(HIPRTRenderData& render_data)\n{\n\tLightPresamplingParameters parameters;\n\t/**\n\t * Parameters specific to the kernel\n\t */\n\n\t // From all the lights of the scene, how many subsets to presample\n\tparameters.number_of_subsets = render_data.render_settings.restir_di_settings.light_presampling.number_of_subsets;\n\t// How many lights to presample in each subset\n\tparameters.subset_size = render_data.render_settings.restir_di_settings.light_presampling.subset_size;\n\t// Buffer that holds the presampled lights\n\tparameters.out_light_samples = m_presampled_lights_buffer.get_device_pointer();\n\n\t// For each presampled light, the probability that this is going to be an envmap sample\n\tparameters.envmap_sampling_probability = render_data.render_settings.restir_di_settings.initial_candidates.envmap_candidate_probability;\n\n\treturn parameters;\n}\n\nvoid ReSTIRDIRenderPass::launch_presampling_lights_pass(HIPRTRenderData& render_data)\n{\n\tLightPresamplingParameters launch_parameters = configure_light_presampling_pass(render_data);\n\n\tvoid* launch_args[] = { &launch_parameters, &render_data };\n\tint thread_count = render_data.render_settings.restir_di_settings.light_presampling.number_of_subsets * render_data.render_settings.restir_di_settings.light_presampling.subset_size;\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->launch_asynchronous(32, 1, thread_count, 1, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRDIRenderPass::configure_initial_pass(HIPRTRenderData& render_data)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_TRUE)\n\t\trender_data.render_settings.restir_di_settings.light_presampling.light_samples = m_presampled_lights_buffer.get_device_pointer();\n\telse\n\t\trender_data.render_settings.restir_di_settings.light_presampling.light_samples = nullptr;\n\trender_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs = m_initial_candidates_reservoirs.get_device_pointer();\n}\n\nvoid ReSTIRDIRenderPass::launch_initial_candidates_pass(HIPRTRenderData& render_data)\n{\n\tconfigure_initial_pass(render_data);\n\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRDIRenderPass::configure_temporal_pass(HIPRTRenderData& render_data)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_di_settings.common_temporal_pass.permutation_sampling_random_bits = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_di_settings.common_temporal_pass.temporal_buffer_clear_requested = m_temporal_buffer_clear_requested;\n\n\t// The input of the temporal pass is the output of last frame's\n\t// ReSTIR (and also the initial candidates but this is implicit\n\t// and hardcoded in the shader)\n\trender_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs = m_last_restir_output_reservoirs;\n\n\tReSTIRDIReservoir* temporal_output_reservoirs;\n\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\t// If we're going to do spatial reuse, reuse the initial\n\t\t// candidate reservoirs to store the output of the temporal pass.\n\t\t// The spatial reuse pass will read form that buffer.\n\t\t// \n\t\t// Reusing the initial candidates buffer (which is an input\n\t\t// to the temporal pass) as the output is legal and does not\n\t\t// cause a race condition because a given pixel only read and\n\t\t// writes to its own pixel in the initial candidates buffer.\n\t\t// We're not risking another pixel reading in someone else's\n\t\t// pixel in the initial candidates buffer while we write into\n\t\t// it (that would be a race condition)\n\t\ttemporal_output_reservoirs = m_initial_candidates_reservoirs.get_device_pointer();\n\telse\n\t{\n\t\t// Else, no spatial reuse, the output of the temporal pass is going to be in its own buffer (because otherwise, \n\t\t// if we output in the initial candidates buffer, then it's going to be overriden by the initial candidates pass of the next frame).\n\t\t// Alternatively using m_spatial_output_reservoirs_1 and m_spatial_output_reservoirs_2 to avoid race conditions\n\t\tif (odd_frame)\n\t\t\ttemporal_output_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\t\telse\n\t\t\ttemporal_output_reservoirs = m_spatial_output_reservoirs_2.get_device_pointer();\n\t}\n\n\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = temporal_output_reservoirs;\n}\n\nvoid ReSTIRDIRenderPass::launch_temporal_reuse_pass(HIPRTRenderData& render_data)\n{\n\tconfigure_temporal_pass(render_data);\n\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRDIRenderPass::configure_temporal_pass_for_fused_spatiotemporal(HIPRTRenderData& render_data)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_di_settings.common_temporal_pass.permutation_sampling_random_bits = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_di_settings.common_temporal_pass.temporal_buffer_clear_requested = m_temporal_buffer_clear_requested;\n\n\t// The input of the temporal pass is the output of last frame's\n\t// ReSTIR (and also the initial candidates but this is implicit\n\t// and hardcoded in the shader)\n\trender_data.render_settings.restir_di_settings.temporal_pass.input_reservoirs = m_last_restir_output_reservoirs;\n\n\t// Not needed. In the fused spatiotemporal pass, everything is output by the spatial pass\n\trender_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs = nullptr;\n}\n\nvoid ReSTIRDIRenderPass::configure_spatial_pass(HIPRTRenderData& render_data, int spatial_pass_index)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_di_settings.common_spatial_pass.spatial_pass_index = spatial_pass_index;\n\n\tReSTIRDIReservoir* spatial_pass_input_reservoirs = nullptr;\n\tReSTIRDIReservoir* spatial_pass_output_reservoirs = nullptr;\n\n\tif (spatial_pass_index == 0)\n\t{\n\t\tif (render_data.render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\t// For the first spatial reuse pass, we hardcode reading from the output of the temporal pass and storing into 'm_spatial_output_reservoirs_1'\n\t\t\tspatial_pass_input_reservoirs = render_data.render_settings.restir_di_settings.temporal_pass.output_reservoirs;\n\t\telse\n\t\t\t// If there is no temporal reuse pass, using the initial candidates as the input to the spatial reuse pass\n\t\t\tspatial_pass_input_reservoirs = render_data.render_settings.restir_di_settings.initial_candidates.output_reservoirs;\n\n\t\tspatial_pass_output_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\t}\n\telse\n\t{\n\t\t// And then, starting at the second spatial reuse pass, we read from the output of the previous spatial pass and store\n\t\t// in either m_spatial_output_reservoirs_1 or m_spatial_output_reservoirs_2, depending on which one isn't the input (we don't\n\t\t// want to store in the same buffers that is used for output because that's a race condition so\n\t\t// we're ping-ponging between the two outputs of the spatial reuse pass)\n\n\t\tif ((spatial_pass_index & 1) == 0)\n\t\t{\n\t\t\tspatial_pass_input_reservoirs = m_spatial_output_reservoirs_2.get_device_pointer();\n\t\t\tspatial_pass_output_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\t\t}\n\t\telse\n\t\t{\n\t\t\tspatial_pass_input_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\t\t\tspatial_pass_output_reservoirs = m_spatial_output_reservoirs_2.get_device_pointer();\n\n\t\t}\n\t}\n\n\trender_data.render_settings.restir_di_settings.spatial_pass.input_reservoirs = spatial_pass_input_reservoirs;\n\trender_data.render_settings.restir_di_settings.spatial_pass.output_reservoirs = spatial_pass_output_reservoirs;\n}\n\nvoid ReSTIRDIRenderPass::configure_spatial_pass_for_fused_spatiotemporal(HIPRTRenderData& render_data, int spatial_pass_index)\n{\n\tReSTIRDISettings& restir_settings = render_data.render_settings.restir_di_settings;\n\trestir_settings.common_spatial_pass.spatial_pass_index = spatial_pass_index;\n\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\n\tReSTIRDIReservoir* spatial_pass_input_reservoirs = nullptr;\n\tReSTIRDIReservoir* spatial_pass_output_reservoirs = nullptr;\n\n\tif (spatial_pass_index == 0)\n\t\t// The input of the spatial resampling in the fused spatiotemporal pass is the\n\t\t// temporal buffer of the last frame i.e. the input to the temporal pass\n\t\t//\n\t\t// Note, this line of code below assumes that the temporal pass was configured\n\t\t// prior to calling this function such that\n\t\t// 'restir_settings.temporal_pass.input_reservoirs'\n\t\t// is the proper pointer\n\t\tspatial_pass_input_reservoirs = restir_settings.temporal_pass.input_reservoirs;\n\telse\n\t\t// If this is not the first spatial reuse pass, the input is the output of the previous pass\n\t\tspatial_pass_input_reservoirs = restir_settings.spatial_pass.output_reservoirs;\n\n\t// Outputting in whichever isn't the input\n\tif (spatial_pass_input_reservoirs == m_spatial_output_reservoirs_1.get_device_pointer())\n\t\tspatial_pass_output_reservoirs = m_spatial_output_reservoirs_2.get_device_pointer();\n\telse\n\t\tspatial_pass_output_reservoirs = m_spatial_output_reservoirs_1.get_device_pointer();\n\n\trestir_settings.spatial_pass.input_reservoirs = spatial_pass_input_reservoirs;\n\trestir_settings.spatial_pass.output_reservoirs = spatial_pass_output_reservoirs;\n}\n\nvoid ReSTIRDIRenderPass::launch_spatial_reuse_passes(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data };\n\n\t// Emitting an event for timing all the spatial reuse passes combined\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_spatial_reuse_time_start, m_renderer->get_main_stream()));\n\n\tfor (int spatial_reuse_pass = 0; spatial_reuse_pass < render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes; spatial_reuse_pass++)\n\t{\n\t\tconfigure_spatial_pass(render_data, spatial_reuse_pass);\n\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\t}\n\n\t// Emitting the stop event\n\tOROCHI_CHECK_ERROR(oroEventRecord(m_spatial_reuse_time_stop, m_renderer->get_main_stream()));\n\tm_spatial_reuse_events_recorded = true;\n}\n\nvoid ReSTIRDIRenderPass::configure_spatiotemporal_pass(HIPRTRenderData& render_data)\n{\n\t// The buffers of the temporal pass are going to be configured in the same way\n\tconfigure_temporal_pass_for_fused_spatiotemporal(render_data);\n\n\t// But the spatial pass is going to read from the input of the temporal pass i.e. the temporal buffer of the last frame, it's not going to read from the output of the temporal pass\n\tconfigure_spatial_pass_for_fused_spatiotemporal(render_data, 0);\n}\n\nvoid ReSTIRDIRenderPass::launch_spatiotemporal_pass(HIPRTRenderData& render_data)\n{\n\tconfigure_spatiotemporal_pass(render_data);\n\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\n\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes > 1)\n\t{\n\t\t// We have some more spatial reuse passes to do\n\n\t\tOROCHI_CHECK_ERROR(oroEventRecord(m_spatial_reuse_time_start, m_renderer->get_main_stream()));\n\n\t\tfor (int spatial_pass_index = 1; spatial_pass_index < render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes; spatial_pass_index++)\n\t\t{\n\t\t\tconfigure_spatial_pass_for_fused_spatiotemporal(render_data, spatial_pass_index);\n\t\t\tm_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\t\t}\n\n\t\t// Emitting the stop event\n\t\tOROCHI_CHECK_ERROR(oroEventRecord(m_spatial_reuse_time_stop, m_renderer->get_main_stream()));\n\t\tm_spatial_reuse_events_recorded = true;\n\t}\n}\n\nvoid ReSTIRDIRenderPass::configure_output_buffer(HIPRTRenderData& render_data)\n{\n\tReSTIRDISettings& restir_di_settings = render_data.render_settings.restir_di_settings;\n\n\t// Keeping in mind which was the buffer used last for the output of the spatial reuse pass as this is the buffer that\n\t// we're going to use as the input to the temporal reuse pass of the next frame\n\tif (restir_di_settings.common_spatial_pass.do_spatial_reuse_pass || restir_di_settings.do_fused_spatiotemporal)\n\t\t// If there was spatial reuse, using the output of the spatial reuse pass as the input of the temporal\n\t\t// pass of next frame\n\t\trestir_di_settings.restir_output_reservoirs = restir_di_settings.spatial_pass.output_reservoirs;\n\telse if (restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t// If there was a temporal reuse pass, using that output as the input of the next temporal reuse pass\n\t\trestir_di_settings.restir_output_reservoirs = restir_di_settings.temporal_pass.output_reservoirs;\n\telse\n\t\t// No spatial or temporal, the output of ReSTIR is just the output of the initial candidates pass\n\t\trestir_di_settings.restir_output_reservoirs = restir_di_settings.initial_candidates.output_reservoirs;\n\n\tm_last_restir_output_reservoirs = restir_di_settings.restir_output_reservoirs;\n}\n\nvoid ReSTIRDIRenderPass::compute_render_times()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) != LSS_RESTIR_DI)\n\t\treturn;\n\n\tstd::unordered_map<std::string, float>& ms_time_per_pass = m_renderer->get_render_pass_times();\n\tReSTIRDISettings& restir_di_settings = render_data.render_settings.restir_di_settings;\n\n\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_TRUE)\n\t\tms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID] = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]->compute_execution_time();\n\n\tms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID] = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID]->compute_execution_time();\n\tif (restir_di_settings.do_fused_spatiotemporal)\n\t{\n\t\tms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID] = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID]->compute_execution_time();\n\n\t\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes > 1 && m_spatial_reuse_events_recorded)\n\t\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&ms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID], m_spatial_reuse_time_start, m_spatial_reuse_time_stop));\n\t}\n\telse\n\t{\n\t\tif (restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\tms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID] = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID]->compute_execution_time();\n\n\t\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes > 1 && m_spatial_reuse_events_recorded)\n\t\t\tOROCHI_CHECK_ERROR(oroEventElapsedTime(&ms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID], m_spatial_reuse_time_start, m_spatial_reuse_time_stop));\n\t}\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> ReSTIRDIRenderPass::get_all_kernels()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (!is_render_pass_used())\n\t\treturn {};\n\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> active_kernels = m_kernels;\n\n\tReSTIRDISettings& restir_di_settings = m_renderer->get_render_settings().restir_di_settings;\n\tif (restir_di_settings.do_fused_spatiotemporal)\n\t{\n\t\t// If using spatiotemporal, these two kernels aren't active so we're not returning them\n\t\tactive_kernels.erase(ReSTIRDIRenderPass::RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID);\n\n\t\tif (render_data.render_settings.restir_di_settings.common_spatial_pass.number_of_passes == 1)\n\t\t\t// If we only have one spatial reuse pass, it's already handled by the fused spatiotemporal\n\t\t\t// pass so we need the spatial kernels\n\t\t\tactive_kernels.erase(ReSTIRDIRenderPass::RESTIR_DI_SPATIAL_REUSE_KERNEL_ID);\n\t}\n\telse\n\t\t// Not using fused spatiotemporal\n\t\tactive_kernels.erase(ReSTIRDIRenderPass::RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID);\n\n\tif (m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING) == KERNEL_OPTION_FALSE)\n\t\t// Not using light presampling\n\t\tactive_kernels.erase(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID);\n\n\treturn active_kernels;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> ReSTIRDIRenderPass::get_tracing_kernels()\n{\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> out = m_kernels;\n\n\t// The presampling light kernel isn't a trace kernel\n\tout.erase(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID);\n\n\treturn out;\n}\n\nbool ReSTIRDIRenderPass::is_render_pass_used() const\n{\n\treturn m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_RESTIR_DI;\n}\n\nvoid ReSTIRDIRenderPass::request_temporal_bufffers_clear()\n{\n\tm_temporal_buffer_clear_requested = true;\n}\n\nfloat ReSTIRDIRenderPass::get_VRAM_usage() const\n{\n\treturn (m_initial_candidates_reservoirs.get_byte_size() + \n\t\tm_spatial_output_reservoirs_1.get_byte_size() +\n\t\tm_spatial_output_reservoirs_2.get_byte_size() +\n\t\tm_per_pixel_spatial_reuse_direction_mask_u.get_byte_size() +\n\t\tm_per_pixel_spatial_reuse_direction_mask_ull.get_byte_size() +\n\t\tm_per_pixel_spatial_reuse_radius.get_byte_size() +\n\t\tm_spatial_reuse_statistics_hit_hits.get_byte_size() +\n\t\tm_spatial_reuse_statistics_hit_total.get_byte_size() +\n\t\tm_presampled_lights_buffer.get_byte_size()) / 1000000.0f;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRDIRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RESTIR_DI_RENDER_PASS_H\n#define RESTIR_DI_RENDER_PASS_H\n\n#include \"Device/includes/ReSTIR/DI/Reservoir.h\"\n#include \"Device/includes/ReSTIR/DI/PresampledLight.h\"\n#include \"HIPRT-Orochi/OrochiBuffer.h\"\n#include \"HostDeviceCommon/RenderData.h\"\n#include \"Renderer/RenderPasses/RenderPass.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n\nclass GPURenderer;\n\nclass ReSTIRDIRenderPass : public RenderPass\n{\npublic:\n\t/**\n\t * These constants here are used to reference kernel objects in the 'm_kernels' map\n\t * or in the 'm_render_pass_times' map\n\t */\n\tstatic const std::string RESTIR_DI_INITIAL_CANDIDATES_KERNEL_ID;\n\tstatic const std::string RESTIR_DI_TEMPORAL_REUSE_KERNEL_ID;\n\tstatic const std::string RESTIR_DI_SPATIAL_REUSE_KERNEL_ID;\n\tstatic const std::string RESTIR_DI_SPATIOTEMPORAL_REUSE_KERNEL_ID;\n\tstatic const std::string RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID;\n\tstatic const std::string RESTIR_DI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID;\n\n\tstatic const std::string RESTIR_DI_RENDER_PASS_NAME;\n\n\t/**\n\t * This map contains constants that are the name of the main function of the kernels, their entry points.\n\t * They are used when compiling the kernels.\n\t *\n\t * This means that if you define your camera ray kernel main function as:\n\t *\n\t * GLOBAL_KERNEL_SIGNATURE(void) CameraRays(HIPRTRenderData render_data, int2 res)\n\t *\n\t * Then KERNEL_FUNCTION_NAMES[CAMERA_RAYS_KERNEL_ID] = \"CameraRays\"\n\t */\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FUNCTION_NAMES;\n\n\t/**\n\t * Same as 'KERNELfUNCTION_NAMES' but for kernel files\n\t */\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FILES;\n\n\tReSTIRDIRenderPass() {}\n\tReSTIRDIRenderPass(GPURenderer* renderer);\n\n\t/**\n\t * Precompiles all kernels of this render pass to fill to shader cache in advance.\n\t * \n\t * Kernels will be compiled with their *current* options but with the options contained\n\t * in 'partial_options' overriding the corresponding options of the kernels\n\t */\n\tvoid precompile_kernels(GPUKernelCompilerOptions partial_options, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets);\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override;\n\n\tvirtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) override;\t\n\t\n\t/**\n\t * Allocates/frees the ReSTIR DI buffers depending on whether or not the renderer\n\t * needs them (whether or not ReSTIR DI is being used basically) respectively.\n\t */\n\tvirtual bool pre_render_update(float delta_time) override;\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void update_render_data() override;\n\n\tvirtual void reset(bool reset_by_camera_movement) override;\n\n\tvirtual void compute_render_times() override;\n\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_all_kernels() override;\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels() override;\n\t\n\tvirtual bool is_render_pass_used() const override;\n\tvoid request_temporal_bufffers_clear();\n\n\t/**\n\t * Returns the VRAM used by ReSTIR DI in MB\n\t */\n\tfloat get_VRAM_usage() const;\n\nprivate:\n\tLightPresamplingParameters configure_light_presampling_pass(HIPRTRenderData& render_data);\n\tvoid configure_initial_pass(HIPRTRenderData& render_data);\n\tvoid configure_temporal_pass(HIPRTRenderData& render_data);\n\tvoid configure_temporal_pass_for_fused_spatiotemporal(HIPRTRenderData& render_data);\n\tvoid configure_spatial_pass(HIPRTRenderData& render_data, int spatial_pass_index);\n\tvoid configure_spatial_pass_for_fused_spatiotemporal(HIPRTRenderData& render_data, int spatial_pass_index);\n\tvoid configure_spatiotemporal_pass(HIPRTRenderData& render_data);\n\tvoid configure_output_buffer(HIPRTRenderData& render_data);\n\n\tvoid compute_optimal_spatial_reuse_radii(HIPRTRenderData& render_data);\n\tvoid launch_presampling_lights_pass(HIPRTRenderData& render_data);\n\tvoid launch_initial_candidates_pass(HIPRTRenderData& render_data);\n\tvoid launch_temporal_reuse_pass(HIPRTRenderData& render_data);\n\tvoid launch_spatial_reuse_passes(HIPRTRenderData& render_data);\n\tvoid launch_spatiotemporal_pass(HIPRTRenderData& render_data);\n\n\t// ReSTIR reservoirs for the initial candidates\n\tOrochiBuffer<ReSTIRDIReservoir> m_initial_candidates_reservoirs;\n\t// ReSTIR reservoirs for the output of the spatial reuse pass\n\tOrochiBuffer<ReSTIRDIReservoir> m_spatial_output_reservoirs_1;\n\t// ReSTIR DI final reservoirs of the frame.\n\t// This the output of the spatial reuse passes.\n\t// Those are the reservoirs that are carried over between frames for\n\t// the temporal reuse pass to feed upon\n\tOrochiBuffer<ReSTIRDIReservoir> m_spatial_output_reservoirs_2;\n\n\t// Buffer that holds the presampled lights if light presampling is enabled \n\t// (GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING)\n\t//\n\t// Implementation from the paper\n\t// [Rearchitecting Spatiotemporal Resampling for Production] https://research.nvidia.com/publication/2021-07_rearchitecting-spatiotemporal-resampling-production\n\tOrochiBuffer<ReSTIRDIPresampledLight> m_presampled_lights_buffer;\n\n\tReSTIRDIReservoir* m_last_restir_output_reservoirs = nullptr;\n\n\tOrochiBuffer<unsigned char> m_per_pixel_spatial_reuse_radius;\n\tOrochiBuffer<unsigned int> m_per_pixel_spatial_reuse_direction_mask_u;\n\tOrochiBuffer<unsigned long long int> m_per_pixel_spatial_reuse_direction_mask_ull;\n\n\tOrochiBuffer<unsigned long long int> m_spatial_reuse_statistics_hit_total;\n\tOrochiBuffer<unsigned long long int> m_spatial_reuse_statistics_hit_hits;\n\n\t// If true, the temporal buffers are going to be reset by the temporal pass\n\tbool m_temporal_buffer_clear_requested = false;\n\tbool odd_frame = false;\n\n\t// Events for timing the cumulated render time of all the spatial reuses passes\n\tbool m_spatial_reuse_events_recorded = false;\n\toroEvent_t m_spatial_reuse_time_start = nullptr;\n\toroEvent_t m_spatial_reuse_time_stop = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRGIRenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/ReSTIRGIRenderPass.h\"\n#include \"Renderer/RenderPasses/ReSTIRRenderPassCommon.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_RENDER_PASS_NAME = \"ReSTIR GI Render pass\";\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID = \"ReSTIR GI Initial candidates\";\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID = \"ReSTIR GI Temporal reuse\";\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID = \"ReSTIR GI Spatial reuse\";\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID = \"ReSTIR GI Shading\";\nconst std::string ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID = \"ReSTIR GI Directional reuse compute\";\n\nconst std::unordered_map<std::string, std::string> ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES =\n{\n\t{ RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID, \"ReSTIR_GI_InitialCandidates\" },\n\t{ RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID, \"ReSTIR_GI_TemporalReuse\" },\n\t{ RESTIR_GI_SPATIAL_REUSE_KERNEL_ID, \"ReSTIR_GI_SpatialReuse\" },\n\t{ RESTIR_GI_SHADING_KERNEL_ID, \"ReSTIR_GI_Shading\" },\n\t{ RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID, ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_KERNEL_FUNCTION_NAME },\n};\n\nconst std::unordered_map<std::string, std::string> ReSTIRGIRenderPass::KERNEL_FILES =\n{\n\t{ RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/GI/InitialCandidates.h\"},\n\t{ RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/GI/TemporalReuse.h\" },\n\t{ RESTIR_GI_SPATIAL_REUSE_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/GI/SpatialReuse.h\" },\n\t{ RESTIR_GI_SHADING_KERNEL_ID, DEVICE_KERNELS_DIRECTORY \"/ReSTIR/GI/Shading.h\" },\n\t{ RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID, ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_KERNEL_FILE },\n};\n\nReSTIRGIRenderPass::ReSTIRGIRenderPass() : ReSTIRGIRenderPass(nullptr) {}\nReSTIRGIRenderPass::ReSTIRGIRenderPass(GPURenderer* renderer) : MegaKernelRenderPass(renderer, ReSTIRGIRenderPass::RESTIR_GI_RENDER_PASS_NAME) \n{\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_spatial_reuse_time_start));\n\tOROCHI_CHECK_ERROR(oroEventCreate(&m_spatial_reuse_time_stop));\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_compiler_options = m_renderer->get_global_compiler_options();\n\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->set_kernel_file_path(ReSTIRGIRenderPass::KERNEL_FILES.at(ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->set_kernel_function_name(ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->set_kernel_file_path(ReSTIRGIRenderPass::KERNEL_FILES.at(ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->set_kernel_function_name(ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->set_kernel_file_path(ReSTIRGIRenderPass::KERNEL_FILES.at(ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->set_kernel_function_name(ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n\t\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->set_kernel_file_path(ReSTIRGIRenderPass::KERNEL_FILES.at(ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->set_kernel_function_name(ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->synchronize_options_with(global_compiler_options, GPURenderer::KERNEL_OPTIONS_NOT_SYNCHRONIZED);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, KERNEL_OPTION_TRUE);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->get_kernel_options().set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, 8);\n\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID] = std::make_shared<GPUKernel>();\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->set_kernel_file_path(ReSTIRGIRenderPass::KERNEL_FILES.at(ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->set_kernel_function_name(ReSTIRGIRenderPass::KERNEL_FUNCTION_NAMES.at(ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID));\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->synchronize_options_with(global_compiler_options);\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->get_kernel_options().set_macro_value(ReSTIRRenderPassCommon::DIRECTIONAL_REUSE_IS_RESTIR_GI_COMPILE_OPTION_NAME, KERNEL_OPTION_TRUE);\n}\n\nvoid ReSTIRGIRenderPass::resize(unsigned int new_width, unsigned int new_height)\n{\n\tif (!is_render_pass_used())\n\t\treturn;\n\n\tm_initial_candidates_buffer.resize(new_width * new_height);\n\tm_temporal_buffer.resize(new_width * new_height);\n\tm_spatial_buffer.resize(new_width * new_height);\n\n\tReSTIRRenderPassCommon::resize_directional_reuse_buffers<true>(m_renderer, new_width, new_height, \n\t\tm_per_pixel_spatial_reuse_radius, \n\t\tm_per_pixel_spatial_reuse_direction_mask_u, \n\t\tm_per_pixel_spatial_reuse_direction_mask_ull);\n}\n\nbool ReSTIRGIRenderPass::pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (!is_render_pass_used())\n\t\treturn false;\n\n\tbool recompiled = false;\n\n\tbool need_temporal = render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass && !m_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_temporal;\n\tif (need_temporal)\n\t\t// Temporal needed\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_spatial = render_data.render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass && !m_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->has_been_compiled();\n\trecompiled |= need_spatial;\n\tif (need_spatial)\n\t\t// Spatial needed\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\tbool need_directional_spatial_reuse = !m_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->has_been_compiled() && render_data.render_settings.restir_gi_settings.common_spatial_pass.use_adaptive_directional_spatial_reuse;\n\trecompiled |= need_directional_spatial_reuse;\n\tif (need_directional_spatial_reuse)\n\t\t// Spatial needed\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n\n\treturn recompiled;\n}\n\nbool ReSTIRGIRenderPass::pre_render_update(float delta_time)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tMegaKernelRenderPass::pre_render_update(delta_time);\n\n\tbool render_data_invalidated = false;\n\n\tint2 render_resolution = m_renderer->m_render_resolution;\n\n\tif (is_render_pass_used())\n\t{\n\t\t// ReSTIR GI enabled\n\t\tbool initial_candidates_reservoir_needs_resize = m_initial_candidates_buffer.size() == 0;\n\t\tbool temporal_candidates_reservoir_needs_resize = m_temporal_buffer.size() == 0;\n\t\tbool spatial_candidates_reservoir_needs_resize = m_spatial_buffer.size() == 0;\n\n\t\tif (initial_candidates_reservoir_needs_resize || temporal_candidates_reservoir_needs_resize || spatial_candidates_reservoir_needs_resize)\n\t\t\t// At least on buffer is going to be resized so buffers are invalidated\n\t\t\trender_data_invalidated = true;\n\n\t\tif (initial_candidates_reservoir_needs_resize)\n\t\t\tm_initial_candidates_buffer.resize(render_resolution.x * render_resolution.y);\n\n\t\tif (temporal_candidates_reservoir_needs_resize)\n\t\t\tm_temporal_buffer.resize(render_resolution.x * render_resolution.y);\n\n\t\tif (spatial_candidates_reservoir_needs_resize)\n\t\t\tm_spatial_buffer.resize(render_resolution.x * render_resolution.y);\n\n\t\trender_data_invalidated |= ReSTIRRenderPassCommon::pre_render_update_directional_reuse_buffers<true>(render_data, m_renderer,\n\t\t\tm_per_pixel_spatial_reuse_radius, \n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\n\t\t// Arbitrary setting this one so that we're sure it's pointing to a valid buffer when all the buffers are resized\n\t\tm_last_temporal_output_reservoirs = m_initial_candidates_buffer.get_device_pointer();\n\t}\n\telse\n\t{\n\t\t// ReSTIR GI disabled, we're going to free the buffers if that's not already done\n\t\tif (m_initial_candidates_buffer.size() > 0)\n\t\t{\n\t\t\tm_initial_candidates_buffer.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (m_temporal_buffer.size() > 0)\n\t\t{\n\t\t\tm_temporal_buffer.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (m_spatial_buffer.size() > 0)\n\t\t{\n\t\t\tm_spatial_buffer.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\trender_data_invalidated |= ReSTIRRenderPassCommon::free_directional_reuse_buffers<true>(\n\t\t\tm_per_pixel_spatial_reuse_radius,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\t}\n\n\tif (render_data.render_settings.restir_gi_settings.common_spatial_pass.auto_reuse_radius)\n\t\t// A percentage of the maximum render resolution extent for automatic spatial reuse radius\n\t\trender_data.render_settings.restir_gi_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * ReSTIRRenderPassCommon::AUTO_SPATIAL_RADIUS_RESOLUTION_PERCENTAGE;\n\n\treturn render_data_invalidated;\n}\n\nvoid ReSTIRGIRenderPass::compute_optimal_spatial_reuse_radii(HIPRTRenderData& render_data)\n{\n\tbool accumulating = render_data.render_settings.accumulate;\n\tbool first_frame = render_data.render_settings.sample_number == 0;\n\tbool not_interacting = render_data.render_settings.wants_render_low_resolution == false;\n\tbool using_adaptive_directional_spatial_reuse = render_data.render_settings.restir_gi_settings.common_spatial_pass.use_adaptive_directional_spatial_reuse;\n\n\tif (accumulating && first_frame && not_interacting && using_adaptive_directional_spatial_reuse)\n\t{\n\t\t// If we're not accumulating, we have no guarantee that the camera isn't moving and so\n\t\t// there isn't really an \"optimal\" reuse radius per pixel to find\n\t\t//\n\t\t// But if the camera isn't moving, then the neighborhood of a pixel is fixed and we can optimize\n\t\t// the best spatial reuse radius\n\t\t//\n\t\t// Also, we're only doing this as a \"prepass\" at sample 0: we only need this once for the whole rendering\n\n\t\tunsigned int* per_pixel_spatial_reuse_direction_mask_u = m_per_pixel_spatial_reuse_direction_mask_u.size() > 0 ? m_per_pixel_spatial_reuse_direction_mask_u.get_device_pointer() : nullptr;\n\t\tunsigned long long int* per_pixel_spatial_reuse_direction_mask_ull = m_per_pixel_spatial_reuse_direction_mask_ull.size() > 0 ? m_per_pixel_spatial_reuse_direction_mask_ull.get_device_pointer() : nullptr;\n\t\tunsigned char* per_pixel_spatial_reuse_radius = m_per_pixel_spatial_reuse_radius.get_device_pointer();\n\t\tvoid* launch_args[] = { &render_data, &per_pixel_spatial_reuse_direction_mask_u, &per_pixel_spatial_reuse_direction_mask_ull, &per_pixel_spatial_reuse_radius };\n\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n\t}\n}\n\nvoid ReSTIRGIRenderPass::configure_initial_candidates_pass(HIPRTRenderData& render_data)\n{\n\trender_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer = m_initial_candidates_buffer.get_device_pointer();\n\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n}\n\nvoid ReSTIRGIRenderPass::launch_initial_candidates_pass(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data };\n\n\tm_initial_candidates_generation_seed = render_data.random_number;\n\tif (render_data.render_settings.nb_bounces > 0)\n\t\t// We only need to trace paths for the initial candidates if we have\n\t\t// more than 1 bounce\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRGIRenderPass::configure_temporal_reuse_pass(HIPRTRenderData& render_data)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_gi_settings.common_temporal_pass.temporal_buffer_clear_requested = m_temporal_buffer_clear_requested;\n\t\n\tReSTIRGIReservoir* temporal_input_reservoirs;\n\tReSTIRGIReservoir* temporal_output_reservoirs;\n\n\tif ((render_data.render_settings.sample_number == 0 && render_data.render_settings.accumulate) || render_data.render_settings.need_to_reset)\n\t\t// First frame, using the initial candidates as the input\n\t\ttemporal_input_reservoirs = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer;\n\telse\n\t\t// Not the first frame, the input to the temporal pass is the output of the last frame ReSTIR\n\t\ttemporal_input_reservoirs = m_last_restir_output_reservoirs;\n\n\t// For the output, using whatever buffer isn't the one we're reading from (the input buffer)\n\tif (temporal_input_reservoirs == m_spatial_buffer.get_device_pointer())\n\t\ttemporal_output_reservoirs = m_temporal_buffer.get_device_pointer();\n\telse\n\t\ttemporal_output_reservoirs = m_spatial_buffer.get_device_pointer();\n\n\trender_data.render_settings.restir_gi_settings.temporal_pass.input_reservoirs = temporal_input_reservoirs;\n\trender_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs = temporal_output_reservoirs;\n\n\tm_last_temporal_output_reservoirs = temporal_output_reservoirs;\n}\n\nvoid ReSTIRGIRenderPass::launch_temporal_reuse_pass(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data };\n\n\tif (render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRGIRenderPass::configure_spatial_reuse_pass(HIPRTRenderData& render_data, int spatial_pass_index)\n{\n\trender_data.random_number = m_renderer->get_rng_generator().xorshift32();\n\trender_data.render_settings.restir_gi_settings.common_spatial_pass.spatial_pass_index = spatial_pass_index;\n\n\t// The spatial reuse pass spatially reuse on the output of the temporal pass in the 'temporal buffer' and\n\t// stores in the 'spatial buffer'\n\n\tReSTIRGIReservoir* input_reservoirs;\n\tReSTIRGIReservoir* output_reservoirs;\n\n\tif (spatial_pass_index > 0)\n\t\t// If this is the second spatial reuse pass or more, reading from the output of the previous pass\n\t\tinput_reservoirs = render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs;\n\telse\n\t{\n\t\t// This is the first spatial reuse pass, reading from the output of the temporal pass\n\t\t// or the initial candidates depending on whether or not we have a temporal reuse pass at all\n\n\t\tif (render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\t// and we have a temporal reuse pass so we're going to read from the temporal reservoirs\n\t\t\tinput_reservoirs = render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs;\n\t\telse\n\t\t\t// and we do not have a temporal reuse pass so we're just going to read from the initial candidates\n\t\t\tinput_reservoirs = m_initial_candidates_buffer.get_device_pointer();\n\t}\n\n\t// Outputting to whichever reservoir we're not reading from to avoid race conditions\n\tif (input_reservoirs == m_temporal_buffer.get_device_pointer())\n\t\toutput_reservoirs = m_spatial_buffer.get_device_pointer();\n\telse\n\t\toutput_reservoirs = m_temporal_buffer.get_device_pointer();\n\n\trender_data.render_settings.restir_gi_settings.spatial_pass.input_reservoirs = input_reservoirs;\n\trender_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs = output_reservoirs;\n}\n\nvoid ReSTIRGIRenderPass::launch_spatial_reuse_pass(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data };\n\n\tif (render_data.render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SPATIAL_REUSE_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nvoid ReSTIRGIRenderPass::configure_shading_pass(HIPRTRenderData& render_data)\n{\n\trender_data.random_number = m_initial_candidates_generation_seed;\n\n\tif (render_data.render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\trender_data.render_settings.restir_gi_settings.restir_output_reservoirs = render_data.render_settings.restir_gi_settings.spatial_pass.output_reservoirs;\n\telse if (render_data.render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\trender_data.render_settings.restir_gi_settings.restir_output_reservoirs = render_data.render_settings.restir_gi_settings.temporal_pass.output_reservoirs;\n\telse\n\t\trender_data.render_settings.restir_gi_settings.restir_output_reservoirs = render_data.render_settings.restir_gi_settings.initial_candidates.initial_candidates_buffer;\n\n\tm_last_restir_output_reservoirs = render_data.render_settings.restir_gi_settings.restir_output_reservoirs;\n}\n\nvoid ReSTIRGIRenderPass::launch_shading_pass(HIPRTRenderData& render_data)\n{\n\tvoid* launch_args[] = { &render_data };\n\n\tm_kernels[ReSTIRGIRenderPass::RESTIR_GI_SHADING_KERNEL_ID]->launch_asynchronous(KernelBlockWidthHeight, KernelBlockWidthHeight, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, launch_args, m_renderer->get_main_stream());\n}\n\nbool ReSTIRGIRenderPass::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (!m_render_pass_used_this_frame)\n\t\treturn false;\n\n\tcompute_optimal_spatial_reuse_radii(render_data);\n\n\tconfigure_initial_candidates_pass(render_data);\n\tlaunch_initial_candidates_pass(render_data);\n\n\tconfigure_temporal_reuse_pass(render_data);\n\tlaunch_temporal_reuse_pass(render_data);\n\n\tif (render_data.render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t{\n\t\tfor (int i = 0; i < render_data.render_settings.restir_gi_settings.common_spatial_pass.number_of_passes; i++)\n\t\t{\n\t\t\tconfigure_spatial_reuse_pass(render_data, i);\n\t\t\tlaunch_spatial_reuse_pass(render_data);\n\t\t}\n\t}\n\n\tconfigure_shading_pass(render_data);\n\tlaunch_shading_pass(render_data);\n\n\treturn true;\n}\n\nvoid ReSTIRGIRenderPass::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\t// If we had requested a temporal buffers clear, this has be done by this frame so we can\n\t// now reset the flag\n\tm_temporal_buffer_clear_requested = false;\n\n\tMegaKernelRenderPass::post_sample_update_async(render_data, compiler_options);\n}\n\nvoid ReSTIRGIRenderPass::update_render_data()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\t// Setting the pointers for use in reset_render() in the camera rays kernel\n\tif (is_render_pass_used())\n\t{\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_1 = m_initial_candidates_buffer.get_device_pointer();\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_2 = m_spatial_buffer.get_device_pointer();\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_3 = m_temporal_buffer.get_device_pointer();\n\n\t\tReSTIRRenderPassCommon::update_render_data_common_buffers<true>(render_data,\n\t\t\tm_per_pixel_spatial_reuse_radius,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_u,\n\t\t\tm_per_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tm_spatial_reuse_statistics_hit_hits,\n\t\t\tm_spatial_reuse_statistics_hit_total);\n\t}\n\telse\n\t{\n\t\t// If ReSTIR GI is disabled, setting the pointers to nullptr so that the camera rays kernel\n\t\t// for example can detect that the buffers are freed and doesn't try to reset them or do\n\t\t// anything with them (which would be invalid since we would be accessing nullptr buffers)\n\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_1 = nullptr;\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_2 = nullptr;\n\t\trender_data.aux_buffers.restir_gi_reservoir_buffer_3 = nullptr;\n\n\t\trender_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_u = nullptr;\n\t\trender_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_directions_mask_ull = nullptr;\n\t\trender_data.render_settings.restir_gi_settings.common_spatial_pass.per_pixel_spatial_reuse_radius = nullptr;\n\t}\n}\n\nvoid ReSTIRGIRenderPass::reset(bool reset_by_camera_movement)\n{\n\tif (m_spatial_reuse_statistics_hit_hits.size() > 0)\n\t{\n\t\tm_spatial_reuse_statistics_hit_hits.memset_whole_buffer(0);\n\t\tm_spatial_reuse_statistics_hit_total.memset_whole_buffer(0);\n\t}\n\n\tMegaKernelRenderPass::reset(reset_by_camera_movement);\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> ReSTIRGIRenderPass::get_tracing_kernels()\n{\n\treturn get_all_kernels();\n}\n\nbool ReSTIRGIRenderPass::is_render_pass_used() const\n{\n\treturn m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY) == PSS_RESTIR_GI;\n}\n\nvoid ReSTIRGIRenderPass::request_temporal_bufffers_clear()\n{\n\tm_temporal_buffer_clear_requested = true;\n}\n\nfloat ReSTIRGIRenderPass::get_VRAM_usage() const\n{\n\treturn (m_initial_candidates_buffer.get_byte_size() + \n\t\tm_temporal_buffer.get_byte_size() + \n\t\tm_spatial_buffer.get_byte_size() + \n\t\tm_per_pixel_spatial_reuse_direction_mask_u.get_byte_size() +\n\t\tm_per_pixel_spatial_reuse_direction_mask_ull.get_byte_size() +\n\t\tm_per_pixel_spatial_reuse_radius.get_byte_size()) / 1000000.0f;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRGIRenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RESTIR_GI_RENDER_PASS_H\n#define RESTIR_GI_RENDER_PASS_H\n\n#include \"Renderer/RenderPasses/RenderPass.h\"\n#include \"Renderer/RenderPasses/MegaKernelRenderPass.h\"\n#include \"Device/includes/ReSTIR/GI/Reservoir.h\"\n\nclass GPURenderer;\n\nclass ReSTIRGIRenderPass : public MegaKernelRenderPass\n{\npublic:\n\tstatic const std::string RESTIR_GI_RENDER_PASS_NAME;\n\tstatic const std::string RESTIR_GI_INITIAL_CANDIDATES_KERNEL_ID;\n\tstatic const std::string RESTIR_GI_TEMPORAL_REUSE_KERNEL_ID;\n\tstatic const std::string RESTIR_GI_SPATIAL_REUSE_KERNEL_ID;\n\tstatic const std::string RESTIR_GI_SHADING_KERNEL_ID;\n\tstatic const std::string RESTIR_GI_DIRECTIONAL_REUSE_COMPUTE_KERNEL_ID;\n\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FUNCTION_NAMES;\n\tstatic const std::unordered_map<std::string, std::string> KERNEL_FILES;\n\n\tReSTIRGIRenderPass();\n\tReSTIRGIRenderPass(GPURenderer* renderer);\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override;\n\n\tvirtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache) override;\n\tvirtual bool pre_render_update(float delta_time) override;\n\n\t/**\n\t * This pass computes the optimal reuse radius and reuse directions to use, per-pixel\n\t * during the spatial reuse passes\n\t *\n\t * This is a no-op if not accumulating i.e. this is only available for offline rendering\n\t */\n\tvoid compute_optimal_spatial_reuse_radii(HIPRTRenderData& render_data);\n\tvoid configure_initial_candidates_pass(HIPRTRenderData& render_data);\n\tvoid launch_initial_candidates_pass(HIPRTRenderData& render_data);\n\tvoid configure_temporal_reuse_pass(HIPRTRenderData& render_data);\n\tvoid launch_temporal_reuse_pass(HIPRTRenderData& render_data);\n\tvoid configure_spatial_reuse_pass(HIPRTRenderData& render_data, int spatial_pass_index);\n\tvoid launch_spatial_reuse_pass(HIPRTRenderData& render_data);\n\tvoid configure_shading_pass(HIPRTRenderData& render_data);\n\tvoid launch_shading_pass(HIPRTRenderData& render_data);\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tvirtual void update_render_data() override;\n\tvirtual void reset(bool reset_by_camera_movement) override;\n\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels() override;\n\n\tvirtual bool is_render_pass_used() const override;\n\tvoid request_temporal_bufffers_clear();\n\n\t/**\n\t * Returns the VRAM used by ReSTIR GI in MB\n\t */\n\tfloat get_VRAM_usage() const;\n\nprivate:\n\t// Events for timing the time taken by spatial reuse\n\toroEvent_t m_spatial_reuse_time_start;\n\toroEvent_t m_spatial_reuse_time_stop;\n\n\tOrochiBuffer<ReSTIRGIReservoir> m_initial_candidates_buffer;\n\tOrochiBuffer<ReSTIRGIReservoir> m_temporal_buffer;\n\tOrochiBuffer<ReSTIRGIReservoir> m_spatial_buffer;\n\n\tOrochiBuffer<unsigned char> m_per_pixel_spatial_reuse_radius;\n\tOrochiBuffer<unsigned int> m_per_pixel_spatial_reuse_direction_mask_u;\n\tOrochiBuffer<unsigned long long int> m_per_pixel_spatial_reuse_direction_mask_ull;\n\n\tOrochiBuffer<unsigned long long int> m_spatial_reuse_statistics_hit_total;\n\tOrochiBuffer<unsigned long long int> m_spatial_reuse_statistics_hit_hits;\n\n\tReSTIRGIReservoir* m_last_temporal_output_reservoirs = nullptr;\n\tReSTIRGIReservoir* m_last_restir_output_reservoirs = nullptr;\n\n\tint m_initial_candidates_generation_seed;\n\tbool m_temporal_buffer_clear_requested;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRRenderPassCommon.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/RenderPasses/ReSTIRRenderPassCommon.h\"\n"
  },
  {
    "path": "src/Renderer/RenderPasses/ReSTIRRenderPassCommon.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RESTIR_RENDER_PASS_COMMON_H\n#define RESTIR_RENDER_PASS_COMMON_H\n\n#include \"HostDeviceCommon/ReSTIRSettingsHelper.h\"\n#include \"Renderer/GPURenderer.h\"\n\nclass ReSTIRRenderPassCommon\n{\npublic:\n\tstatic constexpr const char* const DIRECTIONAL_REUSE_KERNEL_FUNCTION_NAME = \"ReSTIR_Directional_Reuse_Compute\";\n\tstatic constexpr const char* const DIRECTIONAL_REUSE_KERNEL_FILE = DEVICE_KERNELS_DIRECTORY \"/ReSTIR/DirectionalReuseCompute.h\";\n\tstatic constexpr const char* const DIRECTIONAL_REUSE_IS_RESTIR_GI_COMPILE_OPTION_NAME = \"ComputingSpatialDirectionalReuseForReSTIRGI\";\n\n\tstatic constexpr float AUTO_SPATIAL_RADIUS_RESOLUTION_PERCENTAGE = 0.025f;\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic void resize_common_buffers(GPURenderer* renderer, int new_width, int new_height,\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius,\n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u,\n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<ColorRGB32F>& decoupled_shading_reuse_buffer)\n\t{\n\t\tresize_directional_reuse_buffers<IsReSTIRGI>(renderer, new_width, new_height,\n\t\t\tper_pixel_spatial_reuse_radius,\n\t\t\tper_pixel_spatial_reuse_direction_mask_u,\n\t\t\tper_pixel_spatial_reuse_direction_mask_ull);\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic void resize_directional_reuse_buffers(GPURenderer* renderer, int new_width, int new_height,\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius, \n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u, \n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull)\n\t{\n\t\tper_pixel_spatial_reuse_radius.resize(new_width * new_height);\n\n\t\tint bit_count = IsReSTIRGI \n\t\t\t? renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT) \n\t\t\t: renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT);\n\n\t\tif (bit_count <= 32)\n\t\t\tper_pixel_spatial_reuse_direction_mask_u.resize(new_width * new_height);\n\t\telse\n\t\t\tper_pixel_spatial_reuse_direction_mask_ull.resize(new_width * new_height);\n\t}\n\t\n\ttemplate <bool IsReSTIRGI>\n\tstatic bool pre_render_update_common_buffers(const HIPRTRenderData& render_data, GPURenderer* renderer,\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius,\n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u,\n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_hits,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_total,\n\t\tOrochiBuffer<ColorRGB32F>& decoupled_shading_buffer)\n\t{\n\t\tbool render_data_updated = false;\n\n\t\trender_data_updated |= pre_render_update_directional_reuse_buffers<IsReSTIRGI>(render_data, renderer,\n\t\t\tper_pixel_spatial_reuse_radius,\n\t\t\tper_pixel_spatial_reuse_direction_mask_u,\n\t\t\tper_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tspatial_reuse_statistics_hit_hits,\n\t\t\tspatial_reuse_statistics_hit_total);\n\n\t\treturn render_data_updated;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic bool pre_render_update_directional_reuse_buffers(const HIPRTRenderData& render_data, GPURenderer* renderer,\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius, \n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u, \n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_hits,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_total)\n\t{\n\t\tReSTIRCommonSpatialPassSettings spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\t\tconst std::string& mask_bit_count_macro_name = IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT : GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT;\n\t\t\t\n\t\t// Allocating / deallocating the adaptive directional spatial reuse buffers if the feature\n\t\t// isn't used\n\t\tbool render_data_invalidated = false;\n\t\tif (spatial_pass_settings.do_adaptive_directional_spatial_reuse(render_data.render_settings.accumulate))\n\t\t{\n\t\t\t// Allocating the proper buffer whether or not we're using less than 32 bits per mask or more \n\t\t\tif (renderer->get_global_compiler_options()->get_macro_value(mask_bit_count_macro_name) <= 32 && per_pixel_spatial_reuse_direction_mask_u.size() == 0)\n\t\t\t{\n\t\t\t\tper_pixel_spatial_reuse_direction_mask_u.resize(renderer->m_render_resolution.x * renderer->m_render_resolution.y);\n\t\t\t\tper_pixel_spatial_reuse_radius.resize(renderer->m_render_resolution.x * renderer->m_render_resolution.y);\n\t\t\t\tif (per_pixel_spatial_reuse_direction_mask_ull.size() > 0)\n\t\t\t\t\tper_pixel_spatial_reuse_direction_mask_ull.free();\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t\telse if (renderer->get_global_compiler_options()->get_macro_value(mask_bit_count_macro_name) > 32 && per_pixel_spatial_reuse_direction_mask_ull.size() == 0)\n\t\t\t{\n\t\t\t\tper_pixel_spatial_reuse_direction_mask_ull.resize(renderer->m_render_resolution.x * renderer->m_render_resolution.y);\n\t\t\t\tper_pixel_spatial_reuse_radius.resize(renderer->m_render_resolution.x * renderer->m_render_resolution.y);\n\t\t\t\tif (per_pixel_spatial_reuse_direction_mask_u.size() > 0)\n\t\t\t\t\tper_pixel_spatial_reuse_direction_mask_u.free();\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// We're not using the feature so we can free the buffers\n\n\t\t\t// Freeing the proper buffer depending on whether we use the 64 bits buffer or not\n\t\t\tif (per_pixel_spatial_reuse_direction_mask_u.size() > 0)\n\t\t\t{\n\t\t\t\tper_pixel_spatial_reuse_direction_mask_u.free();\n\t\t\t\tper_pixel_spatial_reuse_radius.free();\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t\telse if (per_pixel_spatial_reuse_direction_mask_ull.size() > 0)\n\t\t\t{\n\t\t\t\tper_pixel_spatial_reuse_direction_mask_ull.free();\n\t\t\t\tper_pixel_spatial_reuse_radius.free();\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t}\n\n\t\t// Also allocating / deallocating the buffers for the statistics\n\t\tif (spatial_pass_settings.compute_spatial_reuse_hit_rate)\n\t\t{\n\t\t\tif (spatial_reuse_statistics_hit_total.size() == 0)\n\t\t\t{\n\t\t\t\tspatial_reuse_statistics_hit_total.resize(1);\n\t\t\t\tspatial_reuse_statistics_hit_hits.resize(1);\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Freeing the buffers if the feature isn't used\n\t\t\tif (spatial_reuse_statistics_hit_total.size() > 0)\n\t\t\t{\n\t\t\t\tspatial_reuse_statistics_hit_total.free();\n\t\t\t\tspatial_reuse_statistics_hit_hits.free();\n\n\t\t\t\trender_data_invalidated = true;\n\t\t\t}\n\t\t}\n\n\t\treturn render_data_invalidated;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic bool free_common_buffers(\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius,\n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u,\n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_hits,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_total,\n\t\tOrochiBuffer<ColorRGB32F>& decoupled_shading_reuse_buffer)\n\t{\n\t\tbool render_data_invalidated = false;\n\n\t\trender_data_invalidated |= free_directional_reuse_buffers<IsReSTIRGI>(\n\t\t\tper_pixel_spatial_reuse_radius,\n\t\t\tper_pixel_spatial_reuse_direction_mask_u,\n\t\t\tper_pixel_spatial_reuse_direction_mask_ull,\n\t\t\tspatial_reuse_statistics_hit_hits,\n\t\t\tspatial_reuse_statistics_hit_total);\n\n\t\treturn render_data_invalidated;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic bool free_directional_reuse_buffers(\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius,\n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u,\n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_hits,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_total)\n\t{\n\t\tbool render_data_invalidated = false;\n\n\t\tif (per_pixel_spatial_reuse_direction_mask_u.size() > 0)\n\t\t{\n\t\t\tper_pixel_spatial_reuse_direction_mask_u.free();\n\t\t\tper_pixel_spatial_reuse_radius.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (per_pixel_spatial_reuse_direction_mask_ull.size() > 0)\n\t\t{\n\t\t\tper_pixel_spatial_reuse_direction_mask_ull.free();\n\t\t\tper_pixel_spatial_reuse_radius.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\tif (spatial_reuse_statistics_hit_total.size() > 0)\n\t\t{\n\t\t\tspatial_reuse_statistics_hit_total.free();\n\t\t\tspatial_reuse_statistics_hit_hits.free();\n\n\t\t\trender_data_invalidated = true;\n\t\t}\n\n\t\treturn render_data_invalidated;\n\t}\n\n\ttemplate <bool IsReSTIRGI>\n\tstatic void update_render_data_common_buffers(HIPRTRenderData& render_data,\n\t\tOrochiBuffer<unsigned char>& per_pixel_spatial_reuse_radius,\n\t\tOrochiBuffer<unsigned int>& per_pixel_spatial_reuse_direction_mask_u,\n\t\tOrochiBuffer<unsigned long long int>& per_pixel_spatial_reuse_direction_mask_ull,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_hits,\n\t\tOrochiBuffer<unsigned long long int>& spatial_reuse_statistics_hit_total)\n\t{\n\t\tReSTIRCommonSpatialPassSettings& common_spatial_pass_settings = ReSTIRSettingsHelper::get_restir_spatial_pass_settings<IsReSTIRGI>(render_data);\n\n\t\tif (per_pixel_spatial_reuse_direction_mask_u.size() > 0)\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_directions_mask_u = per_pixel_spatial_reuse_direction_mask_u.get_device_pointer();\n\t\telse\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_directions_mask_u = nullptr;\n\t\t\t\n\t\tif (per_pixel_spatial_reuse_direction_mask_ull.size() > 0)\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_directions_mask_ull = per_pixel_spatial_reuse_direction_mask_ull.get_device_pointer();\n\t\telse\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_directions_mask_ull = nullptr;\n\n\t\tif (per_pixel_spatial_reuse_radius.size() > 0)\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_radius = per_pixel_spatial_reuse_radius.get_device_pointer();\n\t\telse\n\t\t\tcommon_spatial_pass_settings.per_pixel_spatial_reuse_radius = nullptr;\n\n\t\tif (common_spatial_pass_settings.compute_spatial_reuse_hit_rate)\n\t\t{\n\t\t\tcommon_spatial_pass_settings.spatial_reuse_hit_rate_total = spatial_reuse_statistics_hit_total.get_atomic_device_pointer();\n\t\t\tcommon_spatial_pass_settings.spatial_reuse_hit_rate_hits = spatial_reuse_statistics_hit_hits.get_atomic_device_pointer();\n\t\t}\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/RenderGraph.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/RenderGraph.h\"\n\nRenderGraph::RenderGraph() : RenderGraph(nullptr) {}\n\nRenderGraph::RenderGraph(GPURenderer* renderer) : RenderPass(renderer) {}\n\nvoid RenderGraph::set_render_window(RenderWindow* render_window)\n{\n\t// Setting the render window for all render passes\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->set_render_window(render_window);\n}\n\nvoid RenderGraph::compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->compile(hiprt_orochi_ctx, func_name_sets);\n}\n\nvoid RenderGraph::recompile(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->recompile(hiprt_orochi_ctx, func_name_sets, silent, use_cache);\n}\n\nvoid RenderGraph::resize(unsigned int new_width, unsigned int new_height)\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->resize(new_width, new_height);\n}\n\nbool RenderGraph::pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tm_renderer->synchronize_all_kernels();\n\n\tbool recompiled = false;\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\trecompiled |= name_to_render_pass.second->pre_render_compilation_check(hiprt_orochi_ctx, func_name_sets, silent, use_cache);\n\n\treturn recompiled;\n}\n\nvoid RenderGraph::prepass()\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->prepass();\n}\n\nvoid RenderGraph::update_is_render_pass_used()\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->set_is_render_pass_used(name_to_render_pass.second->is_render_pass_used());\n}\n\nbool RenderGraph::pre_render_update(float delta_time)\n{\n\tbool render_data_invalidated = false;\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\trender_data_invalidated |= name_to_render_pass.second->pre_render_update(delta_time);\n\n\t// pre_render_update means that this is a new frame\n\tm_new_frame = true;\n\n\treturn render_data_invalidated;\n}\n\nbool RenderGraph::launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\t\t// Resetting the state of whether or not the render passes have been launched this frame or not\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t{\n\t\tm_render_pass_launched_this_frame_yet[name_to_render_pass.second.get()] = false;\n\n\t\tif (m_new_frame)\n\t\t\tm_render_pass_effectively_launched_this_frame[name_to_render_pass.second.get()] = false;\n\t}\n\n\t// Launching all the render passes\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tlaunch_render_pass_with_dependencies(name_to_render_pass.second, render_data, compiler_options);\n\n\t// This is not a fresh frame anymore\n\tm_new_frame = false;\n\n\treturn true;\n}\n\nvoid RenderGraph::launch_render_pass_with_dependencies(std::shared_ptr<RenderPass> render_pass, HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tif (render_pass == nullptr)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"The render pass \\\"%s\\\" wasn't added to the RenderGraph but appears as a dependency of another render pass!\", render_pass->get_name().c_str());\n\n\t\treturn;\n\t}\n\n\tif (m_render_pass_launched_this_frame_yet[render_pass.get()] == true)\n\t\t// This pas has already been launched\n\t\treturn;\n\n\t// Launching all the dependencies first\n\tfor (std::shared_ptr<RenderPass> dependency : render_pass->get_dependencies())\n\t\tlaunch_render_pass_with_dependencies(dependency, render_data, compiler_options);\n\n\t// Now launching the render pass itself since all dependencies have been launched\n\tbool effectively_launched = render_pass->launch_async(render_data, compiler_options);\n\tm_render_pass_launched_this_frame_yet[render_pass.get()] = true;\n\n\tif (effectively_launched)\n\t\t// Only setting the effectively launched to true if the render pass was launched\n\t\t// Otherwise, this leaves it at its current value\n\t\tm_render_pass_effectively_launched_this_frame[render_pass.get()] = true;\n}\n\nvoid RenderGraph::post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options)\n{\n\tfor (auto& name_to_render_pass : m_render_passes)\n\t\tname_to_render_pass.second->post_sample_update_async(render_data, compiler_options);\n}\n\nvoid RenderGraph::update_render_data()\n{\n\tfor (auto& render_pass : m_render_passes)\n\t\trender_pass.second->update_render_data();\n}\n\nvoid RenderGraph::reset(bool reset_by_camera_movement)\n{\n\tfor (auto& render_pass : m_render_passes)\n\t\trender_pass.second->reset(reset_by_camera_movement);\n}\n\nvoid RenderGraph::compute_render_times()\n{\n\tfor (auto& render_pass : m_render_passes)\n\t\tif (m_render_pass_effectively_launched_this_frame[render_pass.second.get()])\n\t\t\trender_pass.second->compute_render_times();\n}\n\nvoid RenderGraph::update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics)\n{\n\tfor (auto& render_pass : m_render_passes)\n\t\tif (m_render_pass_effectively_launched_this_frame[render_pass.second.get()])\n\t\t\trender_pass.second->update_perf_metrics(perf_metrics);\n}\n\nfloat RenderGraph::get_full_frame_time()\n{\n\tfloat frame_time_sum = 0.0f;\n\n\tfor (auto& render_pass : m_render_passes)\n\t\tif (m_render_pass_effectively_launched_this_frame[render_pass.second.get()])\n\t\t\tframe_time_sum += render_pass.second->get_full_frame_time();\n\n\treturn frame_time_sum;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> RenderGraph::get_all_kernels() \n{\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> out;\n\n\t// For all render passes\n\tfor (auto& render_pass : m_render_passes)\n\t\t// For all the kernels of this render pass\n\t\tfor (auto& name_to_kernel : render_pass.second->get_all_kernels())\n\t\t\tout[name_to_kernel.first] = name_to_kernel.second;\n\n\treturn out;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> RenderGraph::get_tracing_kernels()\n{\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> out;\n\n\t// For all render passes\n\tfor (auto& render_pass : m_render_passes)\n\t\t// For all the kernels of this render pass\n\t\tfor (auto& name_to_kernel : render_pass.second->get_tracing_kernels())\n\t\t\tout[name_to_kernel.first] = name_to_kernel.second;\n\n\treturn out;\n}\n\nvoid RenderGraph::add_render_pass(std::shared_ptr<RenderPass> render_pass)\n{\n\tif (m_render_passes.find(render_pass->get_name()) != m_render_passes.end())\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"A render pass with name %s already exists in the render graph. This call to add_render_pass() didn't change anything.\", render_pass->get_name().c_str());\n\n\t\treturn;\n\t}\n\n\tm_render_passes[render_pass->get_name()] = render_pass;\n}\n\nstd::shared_ptr<RenderPass> RenderGraph::get_render_pass(const std::string& render_pass_name)\n{\n\treturn m_render_passes[render_pass_name];\n}\n\nstd::unordered_map<std::string, std::shared_ptr<RenderPass>> RenderGraph::get_render_passes()\n{\n\treturn m_render_passes;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/RenderGraph.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_RENDER_GRAPH_H\n#define RENDERER_RENDER_GRAPH_H\n\n#include \"Renderer/RenderPasses/RenderPass.h\"\n\n#include <memory>\n#include <unordered_map>\n\nclass GPURenderer;\n\nclass RenderGraph : public RenderPass\n{\npublic:\n\tRenderGraph();\n\tRenderGraph(GPURenderer* renderer);\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvirtual void compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}) override;\n\tvirtual void recompile(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) override;\n\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) override;\n\n\tvirtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) override;\n\n\tvirtual void prepass() override;\n\tvirtual void update_is_render_pass_used();\n\tvirtual bool pre_render_update(float delta_time) override;\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) override;\n\n\tvirtual void update_render_data() override;\n\tvirtual void reset(bool reset_by_camera_movement) override;\n\t\n\tvirtual void compute_render_times() override;\n\tvirtual void update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics) override;\n\n\tvirtual float get_full_frame_time() override;\n\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_all_kernels() override;\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels() override;\n\n\tvoid add_render_pass(std::shared_ptr<RenderPass> render_pass);\n\tstd::shared_ptr<RenderPass> get_render_pass(const std::string& render_pass_name);\n\tstd::unordered_map<std::string, std::shared_ptr<RenderPass>> get_render_passes();\n\nprivate:\n\t// Launches all the dependencies (recursively) of the given render pass and\n\t// then launches the given render pass.\n\tvoid launch_render_pass_with_dependencies(std::shared_ptr<RenderPass> render_pass, HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options);\n\n\t// Whether or not launch() has been called on a given render pass this frame.\n\t// This is used to know whether a render pass has already been launched this frame\n\tstd::unordered_map<RenderPass*, bool> m_render_pass_launched_this_frame_yet;\n\t// Whether or not launch(), called on a given render pass, returned true this frame\n\t// \n\t// Because calling launch() on a render pass may not *actually* launch the render pass on the GPU\n\t// (this can happen for example is a render pass is only being launched every N frames. Only\n\t// one out of N calls to launch() will actually launch the render pass on the GPU), we\n\t// will need to know when a render pass has effectively been launched because if it hasn't,\n\t// we can't get the render pass times for this render pass for example\n\tstd::unordered_map<RenderPass*, bool> m_render_pass_effectively_launched_this_frame;\n\n\t// Name --> RenderPass\n\t// The name is actually just render_pass.get_name()\n\tstd::unordered_map<std::string, std::shared_ptr<RenderPass>> m_render_passes;\n\n\t// Whether or not launch has already been called for this *frame* (not sample)\n\tbool m_new_frame = true;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RenderPasses/RenderPass.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/RenderPass.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n\nRenderPass::RenderPass() {}\nRenderPass::RenderPass(GPURenderer* renderer) : RenderPass(renderer, \"Unnamed render pass\") {}\nRenderPass::RenderPass(GPURenderer* renderer, const std::string& name) : m_renderer(renderer), m_name(name) {}\n\nvoid RenderPass::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n}\n\nvoid RenderPass::compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n\tif (!is_render_pass_used())\n\t\treturn;\n\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t\tThreadManager::start_thread(ThreadManager::COMPILE_KERNELS_THREAD_KEY, ThreadFunctions::compile_kernel, m_kernels[name_to_kernel.first], hiprt_orochi_ctx, std::ref(func_name_sets));\n}\n\nvoid RenderPass::recompile(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets, bool silent, bool use_cache)\n{\n\tif (!is_render_pass_used())\n\t\t// Not recompiling if the render pass is disabled / not being used\n\t\treturn;\n\n\t// The default implementation recompiles all the kernels returned by 'get_all_kernels()'\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t\tname_to_kernel.second->compile(hiprt_orochi_ctx, func_name_sets, use_cache, silent);\n}\n\nvoid RenderPass::compute_render_times()\n{\n\tif (!is_render_pass_used())\n\t\t// No times to compute if the render pass is disabled / not being used\n\t\treturn;\n\n\t// The default implementation iterates over all kernels and adds their time to the\n\t// render pass times of the renderer\n\tstd::unordered_map<std::string, float>& render_pass_times = m_renderer->get_render_pass_times();\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t\trender_pass_times[name_to_kernel.first] = m_kernels[name_to_kernel.first]->compute_execution_time();\n}\n\nvoid RenderPass::update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics)\n{\n\tif (!is_render_pass_used())\n\t\t// No metrics to update if the render pass is disabled / not being used\n\t\treturn;\n\n\t// Add the render pass times computed by 'compute_render_times()' (which was called before\n\t// 'update_perf_metrics') into the performance metrics computer\n\tstd::unordered_map<std::string, float>& render_pass_times = m_renderer->get_render_pass_times();\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t\tperf_metrics->add_value(name_to_kernel.first, render_pass_times[name_to_kernel.first]);\n}\n\nfloat RenderPass::get_full_frame_time()\n{\n\tfloat sum = 0.0f;\n\n\tfor (auto& name_to_kernel : get_all_kernels())\n\t\tsum += name_to_kernel.second->get_last_execution_time();\n\n\treturn sum;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> RenderPass::get_all_kernels()\n{\n\t// The default implementation just returns all the kernels.\n\t\t// Or an empty map if the render pass isn't being used\n\n\tif (!is_render_pass_used())\n\t\treturn {};\n\telse\n\t\treturn m_kernels;\n}\n\nstd::map<std::string, std::shared_ptr<GPUKernel>> RenderPass::get_tracing_kernels()\n{\n\t// The default implementation just returns all the kernels (assumes that they are all tracing kernesl).\n\treturn get_all_kernels();\n}\n\nbool RenderPass::is_render_pass_used() const\n{\n\treturn true;\n}\n\nvoid RenderPass::set_is_render_pass_used(bool is_render_pass_used)\n{\n\tm_render_pass_used_this_frame = is_render_pass_used;\n}\n\nvoid RenderPass::add_dependency(std::shared_ptr<RenderPass> dependency)\n{\n\tm_dependencies.push_back(dependency);\n}\n\nstd::vector<std::shared_ptr<RenderPass>>& RenderPass::get_dependencies()\n{\n\treturn m_dependencies;\n}\n\nconst std::string& RenderPass::get_name()\n{\n\treturn m_name;\n}\n\nvoid RenderPass::set_name(const std::string& new_name)\n{\n\tm_name = new_name;\n}\n"
  },
  {
    "path": "src/Renderer/RenderPasses/RenderPass.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_RENDER_PASS_H\n#define RENDERER_RENDER_PASS_H\n\nclass GPURenderer;\n\n#include \"Compiler/GPUKernel.h\"\n#include \"HIPRT-Orochi/HIPRTOrochiCtx.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n\n#include <map>\n#include <memory>\n#include <string>\n#include <vector>\n\nclass GPURenderer;\nclass RenderWindow;\nstruct HIPRTRenderData;\n\n/**\n * Interface for a GPURenderer render pass\n */\nclass RenderPass\n{\npublic:\n\tRenderPass();\n\tRenderPass(GPURenderer* renderer);\n\tRenderPass(GPURenderer* renderer, const std::string& name);\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\t/**\n\t * This will be called once when the render pass is created.\n\t *\n\t * After this function is called, the render pass should be ready to be\n\t * launch()ed (pre_render_update() will be called before launch() though)\n\t *\n\t * This compile method will always be called on all render passes of a renderer.\n\t * It is the responsibility of the class overriding this method to compile the kernels if necessary or not.\n\t *\n\t * For example: if a ReSTIRDIRenderPass implements this interface but the renderer doesn't\n\t * actually use ReSTIR DI at the moment, then calling 'compile' should probably be a no-op (i.e. return directly),\n\t * otherwise, this would be compiling kernels unecessarily (since the render pass is not being used\n\t * \n\t * The kernels in this function may be compiled asynchronously by using the ThreadManager and launching threads\n\t * with the 'COMPILE_KERNELS_THREAD_KEY' key. Look at the ReSTIR DI render pass for some examples\n\t * \n\t * The default implementation does this and compiles all kernels found in the map returned by\n\t * 'get_all_kernels()'. This assumes that kernels are configured in the constructor \n\t * (given their options, file path, kernel function name ,...). Have a look at the GMoNRenderPass or ReSTIRDIRenderPass\n\t * for examples\n\t */\n\tvirtual void compile(std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {});\n\n\t/**\n\t * When some compiler options of the renderer have been changed and the render pass\n\t * needs to be recompiled\n\t *\n\t * Same remark here as for compile(): It is the responsibility of the class overriding this method\n\t * to compile the kernels if necessary or not.\n\t * \n\t * Recompilation of the kernels may *not* be asynchronous without the addition of a synchronization \n\t * elsewhere (to be sure that the kernels will be compiled before the next frame starts rendering).\n\t * This 'recompile' function is most likely to be called from the ImGui interface code and so \n\t * it must be blocking (or add synchronization elsewhere in the codebase) to be sure that \n\t * the kernels will be fully recompiled before the RenderWindow submits a new frame to the GPU\n\t */\n\tvirtual void recompile(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true);\n\n\t/**\n\t * That function is called when the host renderer is resized (i.e. when the user resizes the window)\n\t * \n\t * This function should be used to resize the buffers used by this \n\t * render pass if those buffers depend on the render resolution\n\t */\n\tvirtual void resize(unsigned int new_width, unsigned int new_height) = 0;\n\n\t/**\n\t * Function before 'pre_render_update()' that should compile kernels that haven't\n\t * been compiled so far if necessary\n\t * \n\t * For example, in a ReSTIR DI render pass, if the temporal reuse is disabled \n\t * when the application starts, the temporal reuse kernel will not be compiled \n\t * because it isn't needed. However, if the user then decides to enable temporal \n\t * reuse at runtime, the temporal reuse will now have to be compiled and this \n\t * function is in charge.\n\t * \n\t * Should return true if at least one kernel was compiled/recompiled, false otherwise\n\t */\n\tvirtual bool pre_render_compilation_check(std::shared_ptr<HIPRTOrochiCtx>& hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets = {}, bool silent = false, bool use_cache = true) { return false; }\n\n\t/**\n\t * This function is called everytime the renderer is reset.\n\t * \n\t * If accumulating, this function is going to be called everytime the renderer settings have changed and the accumulation is reset\n\t * If not accumulating, this is going to be called at every frame\n\t */\n\tvirtual void prepass() {}\n\n\t/**\n\t * Called at each frame, before launch()\n\t * \n\t * Buffer allocations / deallocations depending on whether or not this render pass\n\t * is necessary to the renderer can be done here\n\t * \n\t * 'delta_time' is the time in milliseconds that elapsed between two calls of this method\n\t * \n\t * This function should return true if the HIPRTRenderData structure of the renderer will have to\n\t * be set up again. This is typically the case when some buffers of the render pass have been allocated/deallocated/resized\n\t * and so we need to set the new buffer pointers in the HIPRTRenderData structure such that the GPU\n\t * uses the proper buffer pointers.\n\t * \n\t * Returns false otherwise\n\t */\n\tvirtual bool pre_render_update(float delta_time) = 0;\n\n\t/**\n\t * This function may be overriden by render passes that can be enabled/disabled at runtime.\n\t * \n\t * This is the case of ReSTIR render passes for example: the ReSTIR render passes are not always used for rendering.\n\t * \n\t * This function is called just before pre_render_update() and should return true if the render pass is going to be active\n\t * for the current frame.\n\t * Should return false if the render pass is not going to be active this frame.\n\t * \n\t * If the render pass is not active, launch() and post_render_update() will not be called after is_render_pass_used() is called.\n\t */\n\tvirtual bool is_render_pass_used() const;\n\t/**\n\t * Sets the 'm_render_pass_used_this_frame' boolean\n\t */\n\tvoid set_is_render_pass_used(bool is_render_pass_used);\n\n\t/**\n\t * This should launch the render pass kernels on the GPU\n\t * \n\t * Returns true if the render pass was indeed launched\n\t * Returns false otherwise (if the render pass isn't being used or if the render pass is only launched every frames or ...)\n\t * \n\t * !!!!!!!!!\n\t * Warning: The render_data parameter is a *copy* of the renderer's render data\n\t * \n\t * Any changes made to render_data from this function will not be reflected between each *frame*.\n\t * \n\t * This means that a change a made to render_data at *frame* 0 will not be seen at *frame* 1 by the render pass (even by the same render pass).\n\t * The changes can be seen between samples of the same frame but not between frames.\n\t * You can still modify render_data in this function to facilitate passing arguments to kernels but changes will not\n\t * be reflected in the next frame.\n\t * \n\t * The difference between frame and sample being that a frame can be composed of multiple samples, according to HIPRTRenderSettings::samples_per_frame\n\t * \n\t * If you need some persistent state accross frames, you'll have to keep member variables in your render pass\n\t * \n\t * Modifying m_renderer->get_render_data() from this function is a race concurrency with the asynchronous ImGui UI so care must be taken\n\t * with that.\n\t * \n\t * Same with the compiler_options. \n\t * You can use the 'compiler_options' parameter passed here to check for\n\t * options and decide what to launch / what not to launch but any changes made to the options\n\t * (not that you would ever need to change the compiler options from inside a RenderPass)\n\t * \n\t * Reading from m_renderer->get_global_compiler_options() instead of the 'compiler_options' parameter\n\t * is a race condition with the asynchronous ImGui UI.\n\t * !!!!!!!!!\n\t */\n\tvirtual bool launch_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) = 0;\n\n\t/**\n\t * Called once per sample, after launch()\n\t * \n\t * !!!!!!!!!\n\t * Warning: The render_data parameter is a *copy* of the renderer's render data\n\t * \n\t * Any changes made to render_data from this function will not be reflected between each *frame*.\n\t * \n\t * This means that a change a made to render_data at *frame* 0 will not be seen at *frame* 1 by the render pass (even by the same render pass).\n\t * The changes can be seen between samples of the same frame but not between frames.\n\t * You can still modify render_data in this function to facilitate passing arguments to kernels but changes will not\n\t * be reflected in the next frame.\n\t * \n\t * The difference between frame and sample being that a frame can be composed of multiple samples, according to HIPRTRenderSettings::samples_per_frame\n\t * \n\t * If you need some persistent state accross frames, you'll have to keep member variables in your render pass\n\t * \n\t * Modifying m_renderer->get_render_data() from this function is a race concurrency with the asynchronous ImGui UI so care must be taken\n\t * with that.\n\t * \n\t * Same with the compiler_options. \n\t * You can use the 'compiler_options' parameter passed here to check for\n\t * options and decide what to launch / what not to launch but any changes made to the options\n\t * (not that you would ever need to change the compiler options from inside a RenderPass)\n\t * \n\t * Reading from m_renderer->get_global_compiler_options() instead of the 'compiler_options' parameter\n\t * is a race condition with the asynchronous ImGui UI.\n\t * !!!!!!!!!\n\t */\n\tvirtual void post_sample_update_async(HIPRTRenderData& render_data, GPUKernelCompilerOptions& compiler_options) = 0;\n\n\t/**\n\t * This function is called when the renderer that holds this render pass needs to \n\t * update its render_data structure.\n\t * \n\t * For the most part, this function should modify m_renderer->get_render_data() to set\n\t * up the pointers / variables that will be used by the GPU in the shaders of the render pass\n\t * \n\t * the HIPRTRenderData data structure can be accessed with m_renderer->get_render_data() and it\n\t * can be modified directly\n\t */\n\tvirtual void update_render_data() = 0;\n\n\t/**\n\t * Called when the user resets the render (an option was changed in ImGui, the camera moved, ...)\n\t * \n\t * If 'reset_by_camera_movement' is true, this means that the user moved the camera.\n\t * This parameter can be used by the render pass to decide whether or not to reset the render pass. \n\t * \n\t * Some render passes may not want to reset depending on the state of the renderer\n\t * (we do not want to reset temporal buffers when moving the camera for temporal render passes (ReSTIR) for example)\n\t */\n\tvirtual void reset(bool reset_by_camera_movement) = 0;\n\n\t/**\n\t * This function is called once per frame, after all render passes have executed.\n\t * \n\t * This function should get a reference to the render pass times of the renderer:\n\t * std::unordered_map<std::string, float>& ms_time_per_pass = m_renderer->get_render_pass_times();\n\t * \n\t * and then the execution time of this render pass should be set in the 'ms_time_per_pass' map of the renderer.\n\t * \n\t * For example, for the light presampling pass of ReSTIR DI: \n\t * ms_time_per_pass[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID] = m_kernels[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID].compute_execution_time();\n\t * \n\t * The key used in the map can be arbitrary but should be unique. The practice used in this\n\t * codebase is to define the keys in the render pass itself as \"static const std::string\" and\n\t * use these keys to index the 'ms_time_per_pass' map.\n\t * \n\t * In the example above, the key is 'ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID'\n\t */\n\tvirtual void compute_render_times();\n\n\t/**\n\t * This function is called once per frame, after all render passes have executed.\n\t * \n\t * This function should add the render time of the pass to the performance metrics computer.\n\t * \n\t * For example:\n\t * std::unordered_map<std::string, float> render_pass_times = m_renderer->get_render_pass_times();\n\t * perf_metrics->add_value(ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID, render_pass_times[ReSTIRDIRenderPass::RESTIR_DI_LIGHTS_PRESAMPLING_KERNEL_ID]);\n\t * \n\t * The performance metrics computer is what stores the timings of all the render passes to display\n\t * the \"Performance metrics\" panel in ImGui\n\t * \n\t * It is unlikely that you need to override the default implementation if your 'get_all_kernels()' function\n\t * is properly written (i.e. only returns the kernels actually being used by the render pass)\n\t */\n\tvirtual void update_perf_metrics(std::shared_ptr<PerformanceMetricsComputer> perf_metrics);\n\n\t/**\n\t * Loops over all the kernels of this render pass and sums the kernel times of all the kernels and\n\t * returns the sum\n\t */\n\tvirtual float get_full_frame_time();\n\n\t/**\n\t * Returns a map of all the kernels of this render pass\n\t *\n\t * The map keys are the kernel name\n\t * The map values are the kernel themselves\n\t * \n\t * If this render pass isn't being used by the renderer \n\t * (for example a ReSTIR DI render pass whereas we're using RIS \n\t * for direct lighting at the first bounce, i.e. the ReSTIR DI render \n\t * pass is not in use), this function should return and empty map. \n\t * \n\t * This is such that ImGui doesn't display the GPU timings of this render pass.\n\t * \n\t * This function also should not return inactive kernels of a render pass if\n\t * the render pass has more than 1 kernel. For example, the ReSTIR DI render \n\t * pass has multiple kernels: spatio-temporal, spatial, temporal. \n\t * If spatiotemporal is being used, the spatial and temporal are not being used and \n\t * so they will not be in the map returned by this function. This is also to avoid \n\t * ImGui from displaying the performance metrics about kernels that are not in use \n\t * (and so we have no performance metrics on them)\n\t */\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_all_kernels();\n\n\t/**\n\t * Returns a map of all the kernels of the render pass that trace rays (shadow rays, bounce rays, ...)\n\t *\n\t * This is used in ImGui in the performance settings panel where we can adjust the\n\t * amount of shared memory used for the BVH traversal. Because this is only useful for\n\t * kernels that trace rays, we want a function that returns only the kernels that trace rays\n\t *\n\t * The map keys are the kernel name\n\t * The map values are the kernel themselves\n\t * \n\t * If this render pass isn't being used by the renderer \n\t * (for example a ReSTIR DI render pass whereas we're using RIS \n\t * for direct lighting at the first bounce, i.e. the ReSTIR DI render \n\t * pass is not in use), this function should return and empty map. \n\t * \n\t * This is such that ImGui doesn't display the GPU timings of this render pass.\n\t * \n\t * This function also should not return inactive kernels of a render pass if\n\t * the render pass has more than 1 kernel. For example, the ReSTIR DI render \n\t * pass has multiple kernels: spatio-temporal, spatial, temporal. \n\t * If spatiotemporal is being used, the spatial and temporal are not being used and \n\t * so they will not be in the map returned by this function. This is also to avoid \n\t * ImGui from displaying the performance metrics about kernels that are not in use \n\t * (and so we have no performance metrics on them)\n\t */\n\tvirtual std::map<std::string, std::shared_ptr<GPUKernel>> get_tracing_kernels();\n\n\t/**\n\t * Adds another render pass as a dependency of this render pass.\n\t * The dependency render pass will then always be executed before this render pass is executed\n\t */\n\tvoid add_dependency(std::shared_ptr<RenderPass> dependency);\n\n\t/**\n\t * Returns a list of all the dependencies so far added to this render pass\n\t */\n\tstd::vector<std::shared_ptr<RenderPass>>& get_dependencies();\n\n\tconst std::string& get_name();\n\tvoid set_name(const std::string& new_name);\n\nprotected:\n\t// This boolean is automatically set with the return of the function is_render_pass_used()\n\t// at the beginning of each frame\n\t//\n\t// This boolean should be used in launch() and post_sample_update() functions\n\t// in place of is_render_pass_used(). \n\t//\n\t// This is because \n\t//\t\t- launch() and post_sample_function() are asynchronous with the UI\n\t//\t\t- is_render_pass_used() tends to check for the renderer's global_kernel_options\n\t//\t\t\tto determine if a render pass should be active or not\n\t//\t\t- but that's a race concurrency issue because the global_kernel_options can be modified\n\t//\t\t\tby the ImGui UI while the render pass is running and so checking for the renderer kernel options\n\t//\t\t\tasynchronously can lead to undefined behavior with unintialized buffers\n\t//\n\t//\t\t----> this boolean should be used instead as it defines whether the render pass is active or\n\t//\t\t\tnot for the whole frame\n\t//\n\t// is_render_pass_used() can be used safely at any time outside of the launch() and post_sample_update() functions\n\tbool m_render_pass_used_this_frame = true;\n\t\n\tstd::string m_name;\n\n\t// Access to the renderer that holds the render pass\n\tGPURenderer* m_renderer = nullptr;\n\t// Access to the render window that holds the renderer\n\tRenderWindow* m_render_window = nullptr;\n\n\t// Other render passes which this render pass depends on.\n\t// They will be launched before this render pass\n\tstd::vector<std::shared_ptr<RenderPass>> m_dependencies;\n\n\t// Name --> GPUKernel map\n\tstd::map<std::string, std::shared_ptr<GPUKernel>> m_kernels;\n};\n\n#endif"
  },
  {
    "path": "src/Renderer/RendererAnimationState.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_ANIMATION_STATE_H\n#define RENDERER_ANIMATION_STATE_H\n\n#include <filesystem>\n\nstruct RendererAnimationState\n{\n\t// If true, objects will be animated in the scene at each frame\n\tbool do_animations = false;\n\t// If true, then this means that the renderer is currently rendering a frame\n\t// sequence (typically an animation). This means, for example, that animations\n\t// will only step after the current frame has converged, etc...\n\tbool is_rendering_frame_sequence = false;\n\t// This boolean is read by the various components of the scene that can\n\t// be animated.\n\t// \n\t// If true, this boolean is true, the components are allowed to step their animation.\n\tbool can_step_animation = false;\n\n\t// How many frames have been rendered so far\n\tint frames_rendered_so_far = 0;\n\t// How many frames to render for the frame sequence\n\tint number_of_animation_frames = 100;\n\n\tstd::string frames_output_folder = \"FrameSequence\";\n\n\tstd::string get_frame_filepath()\n\t{\n\t\treturn frames_output_folder + \"/\" + std::to_string(frames_rendered_so_far) + \".png\";\n\t}\n\n\tvoid ensure_output_folder_exists()\n\t{\n\t\tif (!std::filesystem::exists(frames_output_folder)) \n\t\t\t// Creates the folder and any necessary parent directories if it doesn't exist yet\n        \tstd::filesystem::create_directories(frames_output_folder); \n\t}\n\n\tvoid reset()\n\t{\n\t\tframes_rendered_so_far = 0;\n\n\t\tstd::stringstream ss;\n\n\t\tss << \"FrameSeq - \";\n\t\tUtils::get_current_date_string(ss);\n\n\t\tframes_output_folder = ss.str();\n\t}\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/RendererEnvmap.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Image/Image.h\"\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RendererEnvmap.h\"\n\n#define GLM_ENABLE_EXPERIMENTAL\n#include \"glm/gtx/euler_angles.hpp\"\n\nvoid RendererEnvmap::init_from_image(const Image32Bit& image, const std::string& envmap_filepath)\n{\n\tm_envmap_data.pack_from(image);\n\tm_envmap_filepath = envmap_filepath;\n\n\tm_width = image.width;\n\tm_height = image.height;\n}\n\nvoid RendererEnvmap::update(GPURenderer* renderer, float delta_time)\n{\n\tdo_animation(renderer, delta_time);\n\n\t// Updates the data/pointers in WorldSettings that the shaders will use\n\tupdate_renderer(renderer);\n}\n\nvoid RendererEnvmap::recompute_sampling_data_structure(GPURenderer* renderer, const Image32Bit* image)\n{\n\tif (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_NO_SAMPLING)\n\t{\n\t\tif (m_cdf.size() > 0)\n\t\t\tm_cdf.free();\n\n\t\tif (m_alias_table_alias.size() > 0)\n\t\t\tm_alias_table_alias.free();\n\n\t\tif (m_alias_table_probas.size() > 0)\n\t\t\tm_alias_table_probas.free();\n\t}\n\telse if (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_BINARY_SEARCH)\n\t{\n\t\tif (m_alias_table_alias.size() > 0)\n\t\t\tm_alias_table_alias.free();\n\n\t\tif (m_alias_table_probas.size() > 0)\n\t\t\tm_alias_table_probas.free();\n\n\t\trecompute_CDF(image);\n\t}\n\telse if (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_ALIAS_TABLE)\n\t{\n\t\tif (m_cdf.size() > 0)\n\t\t\tm_cdf.free();\n\n\t\trecompute_alias_table(image);\n\t}\n}\n\nvoid RendererEnvmap::recompute_CDF(const Image32Bit* image)\n{\n\tstd::vector<float> cdf_data;\n\tif (image != nullptr)\n\t\tcdf_data = image->compute_cdf();\n\telse\n\t{\n\t\tif (m_envmap_filepath.ends_with(\".exr\"))\n\t\t\tcdf_data = Image32Bit::read_image_exr(m_envmap_filepath, true).compute_cdf();\n\t\telse\n\t\t\tcdf_data = Image32Bit::read_image_hdr(m_envmap_filepath, 4, true).compute_cdf();\n\t}\n\n\tm_cdf.resize(cdf_data.size());\n\tm_cdf.upload_data(cdf_data);\n\tm_luminance_total_sum = cdf_data.back();\n}\n\nvoid RendererEnvmap::recompute_alias_table(const Image32Bit* image)\n{\n\tstd::vector<float> probas;\n\tstd::vector<int> alias;\n\tif (image != nullptr)\n\t\timage->compute_alias_table(probas, alias, &m_luminance_total_sum);\n\telse\n\t{\n\t\tif (m_envmap_filepath.ends_with(\".exr\"))\n\t\t\tImage32Bit::read_image_exr(m_envmap_filepath, true).compute_alias_table(probas, alias, &m_luminance_total_sum);\n\t\telse\n\t\t\tImage32Bit::read_image_hdr(m_envmap_filepath, 4, true).compute_alias_table(probas, alias, &m_luminance_total_sum);\n\t}\n\n\tm_alias_table_probas.resize(probas.size());\n\tm_alias_table_probas.upload_data(probas);\n\tm_alias_table_alias.resize(alias.size());\n\tm_alias_table_alias.upload_data(alias);\n}\n\nRGBE9995Packed* RendererEnvmap::get_packed_data_pointer()\n{\n\treturn m_envmap_data.get_data_pointer();\n}\n\nvoid RendererEnvmap::get_alias_table_device_pointers(float*& out_probas_pointer, int*& out_alias_pointer)\n{\n\tout_probas_pointer = m_alias_table_probas.get_device_pointer();\n\tout_alias_pointer = m_alias_table_alias.get_device_pointer();\n}\n\nfloat* RendererEnvmap::get_cdf_device_pointer()\n{\n\treturn m_cdf.get_device_pointer();\n}\n\nunsigned int RendererEnvmap::get_width() { return m_width; }\nunsigned int RendererEnvmap::get_height() { return m_height; }\n\nfloat RendererEnvmap::get_sampling_structure_VRAM_usage() const\n{\n\t// Just return the sum of everything (both the CDF and alias table) because only one can be\n\t// used at a given time so one of the two will be 0 bytes anyways\n\treturn (m_cdf.get_byte_size() + m_alias_table_probas.get_byte_size() + m_alias_table_alias.get_byte_size()) / 1000000.0f;\n}\n\nvoid RendererEnvmap::do_animation(GPURenderer* renderer, float delta_time)\n{\n\t// We can step the animation either if we're not accumulating or\n\t// if we're accumulating and we're allowed to step the animations\n\tbool can_step_animation = false;\n\tcan_step_animation |= renderer->get_render_settings().accumulate && renderer->get_animation_state().can_step_animation;\n\tcan_step_animation |= !renderer->get_render_settings().accumulate;\n\n\tif (animate && renderer->get_animation_state().do_animations && can_step_animation)\n\t{\n\t\trotation_X += animation_speed_X / 360.0f / (1000.0f / delta_time);\n\t\trotation_Y += animation_speed_Y / 360.0f / (1000.0f / delta_time);\n\t\trotation_Z += animation_speed_Z / 360.0f / (1000.0f / delta_time);\n\n\t\trotation_X = rotation_X - static_cast<int>(rotation_X);\n\t\trotation_Y = rotation_Y - static_cast<int>(rotation_Y);\n\t\trotation_Z = rotation_Z - static_cast<int>(rotation_Z);\n\t}\n\n\tif (rotation_X != prev_rotation_X || rotation_Y != prev_rotation_Y || rotation_Z != prev_rotation_Z)\n\t{\n\t\tglm::mat3x3 rotation_matrix, rotation_matrix_inv;\n\n\t\t// glm::orientate3 interprets the X, Y and Z angles we give it as a yaw/pitch/roll semantic.\n\t\t// \n\t\t// The standard yaw/pitch/roll interpretation is:\n\t\t//\t- Yaw for rotation around Z\n\t\t//\t- Pitch for rotation around Y\n\t\t//\t- Roll for rotation around X\n\t\t// \n\t\t// but with a Z-up coordinate system. We want a Y-up coordinate system so\n\t\t// we want our Yaw to rotate around Y instead of Z (and our Pitch to rotate around Z).\n\t\t// \n\t\t// This means that we need to reverse Y and Z.\n\t\t// \n\t\t// See this picture for a visual aid on what we **don't** want (the z-up):\n\t\t// https://www.researchgate.net/figure/xyz-and-pitch-roll-and-yaw-systems_fig4_253569466\n\t\trotation_matrix = glm::orientate3(glm::vec3(rotation_X * M_TWO_PI, rotation_Z * M_TWO_PI, rotation_Y * M_TWO_PI));\n\t\trotation_matrix_inv = glm::inverse(rotation_matrix);\n\n\t\tenvmap_to_world_matrix = *reinterpret_cast<float3x3*>(&rotation_matrix);\n\t\tworld_to_envmap_matrix = *reinterpret_cast<float3x3*>(&rotation_matrix_inv);\n\n\t\tprev_rotation_X = rotation_X;\n\t\tprev_rotation_Y = rotation_Y;\n\t\tprev_rotation_Z = rotation_Z;\n\t}\n}\n\nvoid RendererEnvmap::update_renderer(GPURenderer* renderer)\n{\n\tWorldSettings& world_settings = renderer->get_world_settings();\n\n\tworld_settings.envmap_to_world_matrix = envmap_to_world_matrix;\n\tworld_settings.world_to_envmap_matrix = world_to_envmap_matrix;\n\n\tif (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_NO_SAMPLING)\n\t{\n\t\tworld_settings.envmap_cdf = nullptr;\n\n\t\tworld_settings.envmap_alias_table.alias_table_probas = nullptr;\n\t\tworld_settings.envmap_alias_table.alias_table_alias = nullptr;\n\t\tworld_settings.envmap_alias_table.size = 0;\n\t\tworld_settings.envmap_alias_table.sum_elements = 0;\n\t}\n\telse if (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_BINARY_SEARCH)\n\t{\n\t\tworld_settings.envmap_cdf = m_cdf.get_device_pointer();\n\t\tworld_settings.envmap_total_sum = m_luminance_total_sum;\n\n\t\tworld_settings.envmap_alias_table.alias_table_probas = nullptr;\n\t\tworld_settings.envmap_alias_table.alias_table_alias = nullptr;\n\t\tworld_settings.envmap_alias_table.size = 0;\n\t\tworld_settings.envmap_alias_table.sum_elements = 0;\n\t}\n\telse if (renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) == ESS_ALIAS_TABLE)\n\t{\n\t\tworld_settings.envmap_cdf = nullptr;\n\t\tworld_settings.envmap_total_sum = m_luminance_total_sum;\n\n\t\tworld_settings.envmap_alias_table.alias_table_probas = m_alias_table_probas.get_device_pointer();\n\t\tworld_settings.envmap_alias_table.alias_table_alias = m_alias_table_alias.get_device_pointer();\n\t\tworld_settings.envmap_alias_table.size = m_alias_table_alias.size();\n\t\tworld_settings.envmap_alias_table.sum_elements = m_luminance_total_sum;\n\t}\n}\n"
  },
  {
    "path": "src/Renderer/RendererEnvmap.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDERER_ENVMAP_H\n#define RENDERER_ENVMAP_H\n\n#include \"Image/EnvmapRGBE9995.h\"\n\nclass GPURenderer;\n\nclass RendererEnvmap\n{\npublic:\n\tfloat rotation_X = 0.0f;\n\tfloat rotation_Y = 0.0f;\n\tfloat rotation_Z = 0.0f;\n\n\tbool animate = false;\n\t// How many degrees per second will the envmap rotate if 'animate' is true\n\tfloat animation_speed_X = 0.0f;\n\tfloat animation_speed_Y = 8.0f;\n\tfloat animation_speed_Z = 0.0f;\n\n\tfloat3x3 envmap_to_world_matrix;\n\tfloat3x3 world_to_envmap_matrix;\n\n\t/**\n\t * Uploads the data from 'image' to a texture on the GPU and remembers the path\n\t * of the envmap if needed later (when recomputing the CDF/alias table for example\n\t * since we do not keep the data of the envmap in memory (envmaps can be quite big), \n\t * we'll have to read it again from the disk)\n\t */\n\tvoid init_from_image(const Image32Bit& image, const std::string& envmap_filepath);\n\n\t/**\n\t * - Updates the animation of the envmap\n\t * - Recomputes the sampling data structure (CDF for binary search sampling, \n\t *\t\talias table for alias table sampling) if necessary\n\t */\n\tvoid update(GPURenderer* renderer, float delta_time);\n\n\t/**\n\t * Computes the CDF or alias table of the envmap based of the envmap sampling strategy used\n\t * by the renderer.\n\t * \n\t * The data structure that is unused will also be freed to free some VRAM.\n\t */\n\tvoid recompute_sampling_data_structure(GPURenderer* renderer, const Image32Bit* = nullptr);\n\n\tRGBE9995Packed* get_packed_data_pointer();\n\tvoid get_alias_table_device_pointers(float*& out_probas_pointer, int*& out_alias_pointer);\n\tfloat* get_cdf_device_pointer();\n\n\tunsigned int get_width();\n\tunsigned int get_height();\n\n\t/**\n\t * Returns the VRAM used by the sampling structure of the envmap in MB\n\t */\n\tfloat get_sampling_structure_VRAM_usage() const;\n\nprivate:\n\tvoid recompute_CDF(const Image32Bit* image);\n\tvoid recompute_alias_table(const Image32Bit* image);\n\n\t/**\n\t * Recomputes the envmap matrices if necessary based on\n\t * the current values of rotation_X, rotation_Y and rotation_Z\n\t * \n     * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n     * call to do_animation()\n\t */\n\tvoid do_animation(GPURenderer* renderer, float delta_time);\n\n\t/**\n\t * Updates the world settings, envmap itself, etc... of the renderer\n\t */\n\tvoid update_renderer(GPURenderer* renderer);\n\n\t// Cached values of rotation_X, Y, Z so that we don't recompute the matrices\n\t// if the rotations haven't changed\n\tfloat prev_rotation_X = -1.0f;\n\tfloat prev_rotation_Y = -1.0f;\n\tfloat prev_rotation_Z = -1.0f;\n\n\t// The envmap path is saved if we need to load the envmap data again\n\t// \n\t// This requires reading from the disk again but this saves memory because\n\t// we don't have to store the envmap, we can just read it from the disk again.\n\t// And envmaps are heavy so we're actually saving a lot of memory there\n\tstd::string m_envmap_filepath;\n\n\t// This object contains the memory data of the envmap\n\tRGBE9995Envmap<true> m_envmap_data;\n\tunsigned int m_width = 0;\n\tunsigned int m_height = 0;\n\n\t// CDF / Alias table for sampling the envmap\n\tOrochiBuffer<float> m_cdf;\n\tOrochiBuffer<float> m_alias_table_probas;\n\tOrochiBuffer<int> m_alias_table_alias;\n\tfloat m_luminance_total_sum = 0.0f;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Sphere.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef SPHERE_H\n#define SPHERE_H\n\n#include \"HostDeviceCommon/HitInfo.h\"\n#include <hiprt/hiprt_types.h> // for hiprtRay\n\nstruct Sphere\n{\n    Sphere(float3 center, float radius, int primitive_index) : center(center), radius(radius), primitive_index(primitive_index) { };\n\n    inline bool intersect(const hiprtRay &ray, HitInfo& hit_info) const\n    {\n        float3 L = ray.origin - center;\n\n        //dot(ray._direction, ray._direction) = 1 because direction is normalized\n        constexpr float a = 1.0f;\n        float b = 2.0f * hippt::dot(ray.direction, L);\n        float c = hippt::dot(L, L) - radius * radius;\n\n        float delta = b * b - 4.0f * a * c;\n        if (delta < 0.0f)\n            return false;\n        else\n        {\n            constexpr float a2 = 2.0f * a;\n\n            if (delta == 0.0f)\n                hit_info.t = -b / a2;\n            else\n            {\n                float sqrt_delta = std::sqrt(delta);\n\n                float t1 = (-b - sqrt_delta) / a2;\n                float t2 = (-b + sqrt_delta) / a2;\n\n                if (t1 < t2)\n                {\n                    hit_info.t = t1;\n                    if (hit_info.t < 0.0f)\n                        hit_info.t = t2;\n                }\n            }\n\n            if (hit_info.t < 0.0f)\n                return false;\n\n            hit_info.inter_point = ray.origin + ray.direction * hit_info.t;\n            hit_info.shading_normal = hippt::normalize(hit_info.inter_point - center);\n            hit_info.primitive_index = primitive_index;\n\n            return true;\n        }\n    }\n\n    float3 center;\n    float radius;\n\n    int primitive_index;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/StatusBuffersValues.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef STATUS_BUFFERS_VALUES_H\n#define STATUS_BUFFERS_VALUES_H\n\nstruct StatusBuffersValues\n{\n\t// Is there at least one pixel that is still active\n\t// (i.e. not all pixels have converged yet)\n\t// Initializing to true. Otherwise, the first call to rendering_done()\n\t// will return true and we will never get past the first frame\n\tbool one_ray_active = true;\n\n\t// How many pixels have converged in the image\n\t// (according to the adaptive sampling or the\n\t// pixel noise threshold for example)\n\tunsigned int pixel_converged_count = 0;\n};\n\n#endif\n"
  },
  {
    "path": "src/Renderer/Triangle.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/Triangle.h\"\n\nfloat3 Triangle::bbox_centroid() const\n{\n    return (hippt::min(m_a, hippt::min(m_b, m_c)) + hippt::max(m_a, hippt::max(m_b, m_c))) / 2;\n}\n\nfloat Triangle::area() const\n{\n    return hippt::length(hippt::cross(m_b - m_a, m_c - m_a)) / 2;\n}\n\nfloat3& Triangle::operator[] (int i)\n{\n    return *((&m_a) + i);\n}\n\nconst float3& Triangle::operator[] (int i) const\n{\n    return *((&m_a) + i);\n}\n"
  },
  {
    "path": "src/Renderer/Triangle.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef TRIANGLE_H\n#define TRIANGLE_H\n\n#include \"HostDeviceCommon/HitInfo.h\"\n#include <hiprt/hiprt_types.h> // for hiprtRay\n\nstruct Triangle\n{\n\tTriangle() {}\n\tTriangle(const float3& a, const float3& b, const float3& c) : m_a(a), m_b(b), m_c(c) {}\n\n\tfloat3 bbox_centroid() const;\n\n\t//From https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm\n\tinline bool intersect(const hiprtRay& ray, hiprtHit& hit_info) const\n\t{\n\t\tconst float EPSILON = 0.0000001f;\n\t\tfloat3 edge1, edge2, h, s, q;\n\t\tfloat a, f, u, v;\n\t\tedge1 = m_b - m_a;\n\t\tedge2 = m_c - m_a;\n\n\t\th = hippt::cross(ray.direction, edge2);\n\t\ta = hippt::dot(edge1, h);\n\n\t\tif (a > -EPSILON && a < EPSILON)\n\t\t\treturn false;    // This ray is parallel to this triangle.\n\n\t\tf = 1.0f / a;\n\t\ts = ray.origin - m_a;\n\t\tu = f * hippt::dot(s, h);\n\n\t\tif (u < 0.0f || u > 1.0f)\n\t\t\treturn false;\n\n\t\tq = hippt::cross(s, edge1);\n\t\tv = f * hippt::dot(ray.direction, q);\n\n\t\tif (v < 0.0f || u + v > 1.0f)\n\t\t\treturn false;\n\n\t\t// At this stage we can compute t to find out where the intersection point is on the line.\n\t\tfloat t = f * hippt::dot(edge2, q);\n\n\t\tif (t > EPSILON) // ray intersection\n\t\t{\n\t\t\thit_info.t = t;\n\t\t\thit_info.normal = hippt::normalize(hippt::cross(edge1, edge2));\n\n\t\t\thit_info.t = t;\n\n\t\t\thit_info.uv = make_float2(u, v);\n\n\t\t\treturn true;\n\t\t}\n\t\telse // This means that there is a line intersection but not a ray intersection.\n\t\t\treturn false;\n\t}\n\n    float area() const;\n\n\tfloat3& operator[] (int index);\n\tconst float3& operator[] (int index) const;\n\n\tfloat3 m_a = {0, 0, 0}, m_b = { 0, 0, 0 }, m_c = { 0, 0, 0 };\n};\n\n#endif\n"
  },
  {
    "path": "src/Scene/BoundingBox.h",
    "content": "#ifndef BOUNDING_BOX_H\n#define BOUNDING_BOX_H\n\n#include \"HostDeviceCommon/Math.h\"\n\n/**\n * Axis Aligned Bounding Box class\n */\nstruct BoundingBox\n{\n\tBoundingBox() {}\n\tBoundingBox(float3 mini, float3 maxi) : mini(mini), maxi(maxi) {}\n\n\t/**\n\t * Extends this bounding box with the given one\n\t */\n\tvoid extend(const BoundingBox& other)\n\t{\n\t\tmini = hippt::min(mini, other.mini);\n\t\tmaxi = hippt::max(maxi, other.maxi);\n\t}\n\n\t/**\n\t * Extends the bounding box with a vertex\n\t */\n\tvoid extend(float3 vertex)\n\t{\n\t\tmini = make_float3(hippt::min(mini.x, vertex.x), hippt::min(mini.y, vertex.y), hippt::min(mini.z, vertex.z));\n\t\tmaxi = make_float3(hippt::max(maxi.x, vertex.x), hippt::max(maxi.y, vertex.y), hippt::max(maxi.z, vertex.z));\n\t}\n\n\t/**\n\t * Returns the length of the longest extent of the bounding box\n\t */\n\tfloat get_max_extent() const\n\t{\n\t\treturn hippt::max(hippt::abs(mini.x - maxi.x), hippt::max(hippt::abs(mini.y -maxi.y), hippt::abs(mini.z - maxi.z)));\n\t}\n\n\t/**\n\t * Returns the length of the extent in the coordinate 'coord'\n\t * \n\t * X = 0, Y = 1, Z = 2\n\t */\n\tfloat get_extent(int coord) const\n\t{\n\t\treturn *(&maxi.x + coord) - *(&mini.x + coord);\n\t}\n\n\tfloat3 get_extents() const\n\t{\n\t\treturn make_float3(get_extent(0), get_extent(1), get_extent(2));\n\t}\n\n\tfloat3 get_center() const\n\t{\n\t\treturn (mini + maxi) * 0.5f;\n\t}\n\n\tfloat3 mini = { std::numeric_limits<float>::max(), std::numeric_limits<float>::max() , std::numeric_limits<float>::max() };\n\tfloat3 maxi = { -std::numeric_limits<float>::max(), -std::numeric_limits<float>::max() , -std::numeric_limits<float>::max() };\n};\n\n#endif"
  },
  {
    "path": "src/Scene/Camera.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Scene/Camera.h\"\n\nHIPRTCamera Camera::to_hiprt(int render_width, int render_height)\n{\n    HIPRTCamera hiprt_cam;\n\n    glm::mat4x4 view_matrix = get_view_matrix();\n    glm::mat4x4 view_matrix_inv = glm::inverse(view_matrix);\n    glm::mat4x4 projection_matrix_inv = glm::inverse(projection_matrix);\n\n    hiprt_cam.inverse_view = *reinterpret_cast<float4x4*>(&view_matrix_inv);\n    hiprt_cam.inverse_projection = *reinterpret_cast<float4x4*>(&projection_matrix_inv);\n\n    glm::mat4x4 view_projection = view_matrix * projection_matrix;\n    hiprt_cam.view_projection = *reinterpret_cast<float4x4*>(&view_projection);\n\n    glm::vec4 position_glm = glm::vec4(0, 0, 0, 1) * view_matrix_inv;\n    hiprt_cam.position = make_float3(position_glm.x, position_glm.y, position_glm.z);\n\n    hiprt_cam.vertical_fov = vertical_fov;\n    hiprt_cam.sensor_width = render_width;\n    hiprt_cam.sensor_height = render_height;\n\n    hiprt_cam.do_jittering = do_jittering;\n\n    return hiprt_cam;\n}\n\nglm::mat4x4 Camera::get_view_matrix() const\n{\n    // For our FPS camera, we want to translate first and then rotate\n    // (so that we rotate around the current position of the camera = FPS camera).\n    // \n    // Because our matrix multiplication in the shaders is by the right\n    // (we multiply point by the right: M * point), we want the translation matrix \n    // on the right on the view matrix construction. We're inverting (or conjugating) \n    // the translation (or the rotation) because we want to construct a \n    // world-to-view matrix so we effectively need to reverse the transformations. \n    // \n    // For example, if the world space position of the camera is (5, 0, 0), \n    // then its translation is (5, 0, 0), obviously. If we now multiply a point \n    // with world space coordinates (5, 0, 0) by the view matrix, we're supposed to\n    // get (0, 0, 0) for the point coordinates in view space since the point is at the \n    // camera's position. This is why we need to add (-5, 0, 0) to the point's position. \n    // We need to apply the inverse translation to bring from world space to view space. \n    // \n    // Same for the rotation\n    //\n    // We transpose the result because glm is column major in memory. We want row major.\n    // Our convention is right-multiplying row major matrix by a column vector/point (which is on the right)\n    glm::mat4x4 view_matrix = glm::transpose(glm::mat4_cast(glm::conjugate(glm::normalize(m_rotation))) * glm::translate(glm::mat4(1.0f), -m_translation));\n\n    return view_matrix;\n}\n\nvoid Camera::set_aspect(float new_aspect)\n{\n    aspect = new_aspect;\n\n    // Recomputing the projection matrix with the new aspect\n    projection_matrix = glm::perspective(vertical_fov, new_aspect, near_plane, far_plane);\n}\n\nvoid Camera::set_FOV_radians(float new_fov)\n{\n    vertical_fov = new_fov;\n\n    // Recomputing the projection matrix with the new FOV\n    projection_matrix = glm::perspective(new_fov, aspect, near_plane, far_plane);\n}\n\nvoid Camera::auto_adjust_speed(const BoundingBox& scene_bounding_box)\n{\n    if (scene_bounding_box.get_max_extent() > 1.0e35f)\n        // Probably an empty scene, we can't adjust the camera speed based on the scene\n        return;\n\n    camera_movement_speed = scene_bounding_box.get_max_extent() / Camera::SCENE_CROSS_TIME;\n}\n\nvoid Camera::translate(glm::vec3 translation_vec)\n{\n    m_translation = m_translation + translation_vec * glm::conjugate(m_rotation);\n}\n\nvoid Camera::translate(float3 translation_vec)\n{\n    translate(glm::vec3(translation_vec.x, translation_vec.y, translation_vec.z));\n}\n\nvoid Camera::zoom(float offset)\n{\n    glm::vec3 zoom_translation(0, 0, offset);\n    m_translation = m_translation + zoom_translation * glm::conjugate(m_rotation);\n}\n\n/**\n * Reference:\n * \n * https://stackoverflow.com/questions/12435671/quaternion-lookat-function\n */\nvoid Camera::look_at_object(const BoundingBox& object_bounding_box)\n{\n    float3 object_center = object_bounding_box.get_center();\n    float3 new_camera_position = object_center;\n    new_camera_position += object_bounding_box.get_max_extent() * make_float3(3.0f, 0.0f, 0.0f);\n    new_camera_position += object_bounding_box.get_max_extent() * make_float3(0.0f, 1.0f, 0.0f);\n\n    glm::vec3 new_position_glm = glm::vec3(new_camera_position.x, new_camera_position.y, new_camera_position.z);\n    glm::vec3 object_center_glm = glm::vec3(object_center.x, object_center.y, object_center.z);\n\n    m_translation = new_position_glm;\n\n    // Forward vector is looking away from the target since we're using\n    // the \"camera looking down -Z\" convention\n    glm::vec3 look_at_vector = glm::normalize(m_translation - object_center_glm);\n    glm::vec3 right_axis = glm::cross(glm::vec3(0.0f, 1.0f, 0.0f), look_at_vector);\n    glm::vec3 rotated_up_axis = glm::cross(look_at_vector, right_axis);\n\n    glm::mat3x3 rot_mat = glm::mat3x3(right_axis, rotated_up_axis, look_at_vector);\n    m_rotation = glm::quat(rot_mat);\n}\n\nvoid Camera::rotate(glm::vec3 rotation_angles_rad)\n{\n    glm::quat qx = glm::angleAxis(rotation_angles_rad.x, glm::vec3(1.0f, 0.0f, 0.0f));\n    glm::quat qy = glm::angleAxis(rotation_angles_rad.y, glm::vec3(0.0f, 1.0f, 0.0f));\n    glm::quat qz = glm::angleAxis(rotation_angles_rad.z, glm::vec3(0.0f, 0.0f, 1.0f));\n\n    glm::quat new_orientation = glm::normalize(qy * m_rotation * qx * qz);\n    m_rotation = new_orientation;\n}\n\nvoid Camera::rotate(float3 rotation_angles_rad)\n{\n    rotate(glm::vec3(rotation_angles_rad.x, rotation_angles_rad.y, rotation_angles_rad.z));\n}\n\nvoid Camera::rotate_around_point(const float3& point, const float3& angles_rad)\n{\n    glm::quat rotation_quat_x = glm::angleAxis(angles_rad.x, glm::vec3(1.0f, 0.0f, 0.0f));\n    glm::quat rotation_quat_y = glm::angleAxis(angles_rad.y, glm::vec3(0.0f, 1.0f, 0.0f));\n    glm::quat rotation_quat_z = glm::angleAxis(angles_rad.z, glm::vec3(0.0f, 0.0f, 1.0f));\n\n    glm::mat4x4 rotation_mat_x = glm::mat4_cast(rotation_quat_x);\n    glm::mat4x4 rotation_mat_y = glm::mat4_cast(rotation_quat_y);\n    glm::mat4x4 rotation_mat_z = glm::mat4_cast(rotation_quat_z);\n\n    glm::vec3 point_glm = glm::vec3(point.x, point.y, point.z);\n    glm::mat4x4 rotation_mat = rotation_mat_z * rotation_mat_y * rotation_mat_x;\n    m_translation = rotation_mat * glm::vec4(m_translation - point_glm, 1.0f);\n    m_translation += point_glm;\n\n    glm::mat4x4 rot_mat = glm::mat4_cast(m_rotation);\n    rot_mat = rotation_mat * rot_mat;\n    m_rotation = glm::quat(rot_mat);\n}\n\nvoid Camera::rotate_around_point(const glm::vec3& point, const float3& angles_rad)\n{\n    rotate_around_point(make_float3(point.x, point.y, point.z), angles_rad);\n}\n"
  },
  {
    "path": "src/Scene/Camera.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef CAMERA_H\n#define CAMERA_H\n\n#include \"HostDeviceCommon/HIPRTCamera.h\"\n#include \"Scene/BoundingBox.h\"\n\n#include \"glm/mat4x4.hpp\"\n#include \"glm/vec3.hpp\"\n#include \"glm/gtc/quaternion.hpp\"\n\n#define _USE_MATH_DEFINES\n#include <math.h>\n\nclass GPURenderer;\n\n/**\n * Camera class meant for being manipulated through used interactions\n * etc... (hence the attributes m_translation and m_rotation for example)\n * \n * The curated camera class that is meant for being used by the shaders is HIPRTCamera\n */\nstruct Camera\n{\n    // Variable used when calling Camera::auto_adjust_speed().\n    // This is a time in seconds that represents how long it will take for the camera\n    // to traverse the scene along its largest extent if the user holds the 'W' key for example.\n    //\n    // Note that this may be a little scuffed for scenes that are very elongated.\n    static constexpr float SCENE_CROSS_TIME = 5.0f;\n\n    HIPRTCamera to_hiprt(int render_width, int render_height);\n    glm::mat4x4 get_view_matrix() const;\n\n    void set_aspect(float new_aspect);\n\n    /**\n     * The given FOV must be in radians\n     */\n    void set_FOV_radians(float new_fov);\n\n    /**\n     * Adjusts the speed attributes of this camera so that the camera\n     */\n    void auto_adjust_speed(const BoundingBox& scene_bounding_box);\n\n    void translate(glm::vec3 translation_vec);\n    void translate(float3 translation_vec);\n\n    /**\n     * Basically a handy function for translating a certain distance in the direction\n     * the camera is looking at\n     */\n    void zoom(float offset);\n\n    void look_at_object(const BoundingBox& object_bounding_box);\n\n    void rotate(glm::vec3 rotation_angles);\n    void rotate(float3 rotation_angles);\n    \n    void rotate_around_point(const float3& point, const float3& angles_rad);\n    void rotate_around_point(const glm::vec3& point, const float3& angles_rad);\n\n    glm::mat4x4 projection_matrix;\n\n    // Whether or not to jitter rays direction for anti-aliasing during the rendering\n    bool do_jittering = true;\n\n    // Vertical FOV in radians\n    float vertical_fov = M_PI * 0.5f;\n    float near_plane = 0.1f;\n    float far_plane = 1000.0f;\n    // Aspect ratio\n    float aspect = 16.0f / 9.0f;\n\n    // Camera movement speed. In world unit per second\n    float camera_movement_speed = 1.0f;\n    // Multiplier on the camera speed that the user can manipulate through the UI\n    float user_movement_speed_multiplier = 1.0f;\n\n    glm::vec3 m_translation = glm::vec3(0.0f);\n    glm::quat m_rotation = glm::quat(glm::vec3(0.0f));\n};\n\n#endif\n"
  },
  {
    "path": "src/Scene/CameraAnimation.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/GPURenderer.h\"\n#include \"Scene/Camera.h\"\n#include \"Scene/CameraAnimation.h\"\n\nvoid CameraAnimation::set_camera(Camera* camera)\n{\n    m_camera = camera;\n}\n\nvoid CameraAnimation::animation_step(GPURenderer* renderer, float delta_time)\n{\n    // We can step the animation either if we're not accumulating or\n    // if we're accumulating and we're allowed to step the animations\n    bool can_step_animation = false;\n    can_step_animation |= renderer->get_render_settings().accumulate && renderer->get_animation_state().can_step_animation;\n    can_step_animation |= !renderer->get_render_settings().accumulate;\n\n    if (animate && renderer->get_animation_state().do_animations && can_step_animation)\n    {\n        do_rotation_animation(delta_time);\n    }\n}\n\nvoid CameraAnimation::do_rotation_animation(float delta_time)\n{\n    if (m_do_rotation_animation)\n    {\n        float rotation_angle_y_deg = 0.0f;\n\n        // Modifying the camera's properties based on the chosen rotation type\n        switch (m_rotation_type)\n        {\n            case SECONDS_PER_ROTATION:\n                // Converting 'm_rotation_value' so that the camera\n                // rotates at such a speed that it will rotate 360.0f\n                // degrees in 'm_rotation_value' seconds\n                rotation_angle_y_deg = 360.0f / m_rotation_value * (delta_time / 1000.0f);\n                break;\n\n            case DEGREES_PER_FRAME:\n                // For 'DEGREES_PER_FRAME', 'm_rotation_value' is already\n                // in degrees so we can just use that value\n                rotation_angle_y_deg = m_rotation_value;\n                break;\n\n            default:\n                break;\n        }\n\n        float rotation_angle_y_rad = rotation_angle_y_deg / 180.0f * M_PI;\n\n        m_camera->rotate_around_point(m_rotate_around_point, make_float3(0.0f, rotation_angle_y_rad, 0.0f));\n    }\n}\n"
  },
  {
    "path": "src/Scene/CameraAnimation.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef CAMERA_ANIMATION_H\n#define CAMERA_ANIMATION_H\n\n#include \"Scene/CameraRotationType.h\"\n\n#include \"glm/vec3.hpp\"\n\nclass Camera;\nclass GPURenderer;\n\nclass CameraAnimation\n{\npublic:\n    void set_camera(Camera* camera);\n\n    /**\n    * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n\t* call to animation_step()\n    */\n    void animation_step(GPURenderer* renderer, float delta_time);\n    /**\n    * The 'delta_time' parameter should be how much time passed, in milliseconds, since the last\n    * call to do_rotation_animation()\n    */\n    void do_rotation_animation(float delta_time);\n\n    // Public attributes here because we want them to be\n    // easily accessible and having to use getter/setters\n    // everywhere is a pain in the butt\n    bool animate = false;\n\n    // If true, the camera will rotate around 'm_rotate_around_point'\n    // with 'm_rotation_duration' as the speed target when 'animate' is\n    // set to true\n    bool m_do_rotation_animation = false;\n    CameraRotationType m_rotation_type = CameraRotationType::SECONDS_PER_ROTATION;\n\n    glm::vec3 m_rotate_around_point = glm::vec3(0.0f, 0.0f, 0.0f);\n    // Rotation speed in number of rotations around the object per second\n    float m_rotation_value = 8.0f;\n\nprivate:\n    Camera* m_camera = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/Scene/CameraRotationType.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef CAMERA_ROTATION_TYPE_H\n#define CAMERA_ROTATION_TYPE_H\n\nenum CameraRotationType\n{\n\tSECONDS_PER_ROTATION,\n\tDEGREES_PER_FRAME\n};\n\n#endif"
  },
  {
    "path": "src/Scene/SceneParser.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Image/Image.h\"\n#include \"Scene/SceneParser.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"Threads/ThreadState.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/CommandlineArguments.h\"\n\n#define GLM_ENABLE_EXPERIMENTAL\n#include \"glm/gtx/matrix_decompose.hpp\"\n\n#include <chrono>\n#include <memory>\n\nextern ImGuiLogger g_imgui_logger;\n\nvoid SceneParser::parse_scene_file(std::string scene_filepath, Assimp::Importer& assimp_importer, Scene& parsed_scene, SceneParserOptions& options)\n{\n    const aiScene* scene;\n    scene = assimp_importer.ReadFile(scene_filepath, aiPostProcessSteps::aiProcess_PreTransformVertices | aiPostProcessSteps::aiProcess_Triangulate | aiPostProcessSteps::aiProcess_GenBoundingBoxes);\n    if (scene == nullptr)\n    {\n        std::cerr << assimp_importer.GetErrorString() << std::endl;\n        std::string message = \"Falling back to default scene...: \" + std::string(CommandlineArguments::DEFAULT_SCENE);\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"%s\", message.c_str());\n\n        scene = assimp_importer.ReadFile(CommandlineArguments::DEFAULT_SCENE, aiPostProcessSteps::aiProcess_PreTransformVertices | aiPostProcessSteps::aiProcess_Triangulate);\n        scene_filepath = CommandlineArguments::DEFAULT_SCENE;\n\n        if (scene == nullptr)\n        {\n            // Couldn't even load the default scene either\n\n            g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Couldn't load the default scene either... Aborting\");\n\n            int charac = std::getchar();\n            std::exit(1);\n        }\n    }\n\n    if (scene->mNumMaterials > NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"This scene contains too many materials for the renderer. Maximum number of material is: %d\", NestedDielectricsInteriorStack::MAX_MATERIAL_INDEX);\n\n        int charac = std::getchar();\n\n        std::exit(1);\n    }\n\n    std::vector<std::pair<aiTextureType, std::string>> texture_paths;\n    // Indices of the texture used by a material\n    std::vector<ParsedMaterialTextureIndices> material_texture_indices;\n    // Index of the material associated with the texutre\n    std::vector<int> material_indices;\n    // How many textures are used per mesh. This is used later when parsing the geometry\n    std::vector<int> texture_per_mesh;\n    // By how much to offset the indices of the textures used by a material.\n    // For example, if there are 5 materials in the scene that all use a different base color\n    // texture, after the call to prepare_textures(), they will all have 0 as the index of their\n    // base color texture. This is obviously wrong and it should be 0, 1, 2, 3, 4 for\n    // each material since they use their own texture. This is what this vector is for, it contains\n    // the offsets that are going to be used so that each material has proper texture indices\n    std::vector<int> texture_indices_offsets;\n    int texture_count;\n\n    // We expect one material per mesh. It can happen that mNumMaterials is > mNumMeshes\n    // which means that there is a material that is not used in the scene then we don't\n    // want to process that material so we're then only interested in the mNumMeshes meshes\n    // that do have a material\n    int num_materials = std::min(scene->mNumMeshes, scene->mNumMaterials);\n    prepare_textures(scene, texture_paths, material_texture_indices, material_indices, texture_per_mesh, texture_indices_offsets, texture_count);\n    parsed_scene.materials.resize(num_materials);\n    // Default value of 1 so that materials that don't have a base color texture have their \"texture\" considered has opaque\n    parsed_scene.material_has_opaque_base_color_texture.resize(num_materials, 1);\n    parsed_scene.metadata.material_names.resize(num_materials);\n    parsed_scene.metadata.mesh_names.resize(scene->mNumMeshes);\n    parsed_scene.metadata.mesh_material_indices.resize(scene->mNumMeshes);\n    parsed_scene.textures.resize(texture_count);\n    assign_material_texture_indices(parsed_scene.materials, material_texture_indices, texture_indices_offsets);\n    dispatch_texture_loading(parsed_scene, scene_filepath, options.nb_texture_threads, texture_paths, material_indices);\n\n    parse_camera(scene, parsed_scene, options.override_aspect_ratio);\n\n    // Used to quickly check whether we've already seen a material based on its\n    // index (because multiple meshes may share the same material, we don't want\n    // to duplicate that material in our material array, we want to use only one).\n    // If we've already seen that material, then there's nothing to do and we can\n    // ignore the material of the mesh being processed since we've already added it\n    // to our materials buffer\n    std::unordered_set<int> material_indices_already_seen;\n\n    // If the scene contains multiple meshes, each mesh will have\n    // its vertices indices starting at 0. We don't want that.\n    // We want indices to be continuously growing (because we don't want\n    // the second mesh (with indices starting at 0, i.e its own indices) to use\n    // the vertices of the first mesh that have been parsed (and that use indices 0!)\n    // The offset thus offsets the indices of the meshes that come after the first one\n    // to account for all the indices of the previously parsed meshes\n    int global_indices_offset = 0;\n    for (int mesh_index = 0; mesh_index < scene->mNumMeshes; mesh_index++)\n    {\n        aiMesh* mesh = scene->mMeshes[mesh_index];\n        int material_index = mesh->mMaterialIndex;\n        aiMaterial* mesh_material = scene->mMaterials[material_index];\n        \n\n        std::string material_name = std::string(mesh_material->GetName().C_Str());\n        std::string mesh_name = std::string(mesh->mName.C_Str());\n        if (material_name == \"\")\n            material_name = std::string(\"Material.\") + std::to_string(material_index);\n        parsed_scene.metadata.material_names[material_index] = material_name;\n        parsed_scene.metadata.mesh_names[mesh_index] = mesh_name;\n        parsed_scene.metadata.mesh_material_indices[mesh_index] = material_index;\n\n        CPUMaterial& renderer_material = parsed_scene.materials[material_index];\n        if (material_indices_already_seen.find(mesh->mMaterialIndex) == material_indices_already_seen.end())\n        {\n            // If we haven't seen that material before\n\n            read_material_properties(mesh_material, renderer_material);\n            material_indices_already_seen.insert(mesh->mMaterialIndex);\n        }\n\n\n        // Inserting the normals if present\n        if (mesh->HasNormals())\n            parsed_scene.vertex_normals.insert(parsed_scene.vertex_normals.end(),\n                reinterpret_cast<float3*>(mesh->mNormals),\n                reinterpret_cast<float3*>(&mesh->mNormals[mesh->mNumVertices]));\n        else\n            parsed_scene.vertex_normals.insert(parsed_scene.vertex_normals.end(), mesh->mNumVertices, hiprtFloat3{0, 0, 0});\n\n        // Inserting texcoords if present, looking at set 0 because that's where \"classical\" texcoords are.\n        // Other sets are assumed not interesting here.\n        if (mesh->HasTextureCoords(0) && texture_per_mesh[material_index] > 0)\n            for (int i = 0; i < mesh->mNumVertices; i++)\n                parsed_scene.texcoords.push_back(make_float2(mesh->mTextureCoords[0][i].x, mesh->mTextureCoords[0][i].y));\n        else\n            parsed_scene.texcoords.insert(parsed_scene.texcoords.end(), mesh->mNumVertices, float2{0.0f, 0.0f});\n\n        // Inserting 0 or 1 depending on whether the normals are present or not.\n        // These values will be used in the shader to determine whether we should do\n        // smooth shading or not\n        parsed_scene.has_vertex_normals.insert(parsed_scene.has_vertex_normals.end(), mesh->mNumVertices, mesh->HasNormals());\n\n        // Inserting all the vertices of the mesh\n        parsed_scene.vertices_positions.insert(parsed_scene.vertices_positions.end(), reinterpret_cast<hiprtFloat3*>(&mesh->mVertices[0]), reinterpret_cast<hiprtFloat3*>(&mesh->mVertices[mesh->mNumVertices]));\n\n        int max_mesh_index_offset = 0;\n        for (int face_index = 0; face_index < mesh->mNumFaces; face_index++)\n        {\n            aiFace face = mesh->mFaces[face_index];\n\n            int index_1 = face.mIndices[0];\n            int index_2 = face.mIndices[1];\n            int index_3 = face.mIndices[2];\n\n            // Accumulating the maximum index of this mesh, this is to know\n            max_mesh_index_offset = std::max(max_mesh_index_offset, std::max(index_1, std::max(index_2, index_3)));\n\n            parsed_scene.triangles_vertex_indices.push_back(index_1 + global_indices_offset);\n            parsed_scene.triangles_vertex_indices.push_back(index_2 + global_indices_offset);\n            parsed_scene.triangles_vertex_indices.push_back(index_3 + global_indices_offset);\n        }\n\n        // We're pushing the same material index for all the faces of this mesh\n        // because all faces of a mesh have the same material (that's how ASSIMP assimp_importer's\n        // do things internally). An ASSIMP mesh is basically a set of faces that all have the\n        // same material.\n        // If you're importing the 3D model of a car, even though you probably think of it as only one \"3D mesh\",\n        // ASSIMP sees it as composed of as many meshes as there are different materials\n        parsed_scene.material_indices.insert(parsed_scene.material_indices.end(), mesh->mNumFaces, material_index);\n\n        // Adding the bounding box to the parsed scene\n        aiAABB mesh_aabb = mesh->mAABB;\n        BoundingBox mesh_bounding_box;\n        mesh_bounding_box.mini = make_float3(mesh_aabb.mMin.x, mesh_aabb.mMin.y, mesh_aabb.mMin.z);\n        mesh_bounding_box.maxi = make_float3(mesh_aabb.mMax.x, mesh_aabb.mMax.y, mesh_aabb.mMax.z);\n        if (mesh_bounding_box.get_max_extent() == 0.0f)\n        {\n            // I've had cases where the bounding box given by ASSIMP was (0, 0, 0), (0, 0, 0).\n            // Don't know why\n            //\n            // To avoid this weird, we fall back to manual computation of the bounding box\n\n            // Resetting the bounding because we just set its min and max to (0, 0, 0) and (0, 0, 0)\n            // because of the situation we're in\n            mesh_bounding_box = BoundingBox();\n            for (int vert_index = 0; vert_index < mesh->mNumVertices; vert_index++)\n                mesh_bounding_box.extend(*(float3*)(&mesh->mVertices[vert_index]));\n        }\n\n        parsed_scene.metadata.mesh_bounding_boxes.push_back(mesh_bounding_box);\n        // Extending the bounding box of the scene with the bounding box of the mesh\n        parsed_scene.metadata.scene_bounding_box.extend(mesh_bounding_box);\n\n        // If the max index of the mesh was 19, we want the next to start\n        // at 20, not 19, so we ++\n        max_mesh_index_offset++;\n        // Adding the maximum index of the mesh to our global indices offset \n        global_indices_offset += max_mesh_index_offset;\n    }\n\n    // Adjusting the speed of the camera so that we can cross the scene in approximately Camera::SCENE_CROSS_TIME\n    parsed_scene.camera.auto_adjust_speed(parsed_scene.metadata.scene_bounding_box);\n\n    // We need to process the emissive triangles in a separate pass because:\n    //  - Some meshes may be using emissive textures\n    //  - Some of these textures may be constant textures (which that the whole texture is only one color)\n    //      These textures will actually not be loaded but rather the unique color of the texture\n    //      will be set as the emission of the material\n    //      Because this is all done on a separate thread asynchronously, we need to wait for that thread\n    //      to finish. That thread we're talking about is the thread that is loading the texture so we're adding\n    //      another thread which has a dependecy on the texture loading thread.\n    // This new thread will process the triangles of the scene and mark them as emissive and we can now use\n    // the information of the potential constant-emission textures\n    ThreadManager::add_dependency(ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES, ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY);\n    ThreadManager::start_thread(ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES, ThreadFunctions::load_scene_parse_emissive_triangles, scene, std::ref(parsed_scene));\n    ThreadManager::start_thread(ThreadManager::SCENE_LOADING_COMPUTE_TRIANGLE_AREAS, ThreadFunctions::load_scene_compute_triangle_areas, std::ref(parsed_scene));\n}\n\nvoid SceneParser::parse_camera(const aiScene* scene, Scene& parsed_scene, float frame_aspect_override)\n{\n    // Taking the first camera as the camera of the scene\n    if (scene->mNumCameras > 0)\n    {\n        aiCamera* camera = scene->mCameras[0];\n\n        glm::vec3 camera_position = *reinterpret_cast<glm::vec3*>(&camera->mPosition);\n        glm::vec3 camera_lookat = *reinterpret_cast<glm::vec3*>(&camera->mLookAt);\n        glm::vec3 camera_up = *reinterpret_cast<glm::vec3*>(&camera->mUp);\n\n        // Inversing the lookat because glm::lookat creates a world->view matrix which means\n        // that the position of the camera in world->view matrix is going to be '-true_position'\n        // \n        // Same for the other properties of the matrix\n        glm::mat4x4 lookat = glm::inverse(glm::lookAt(camera_position, camera_lookat, camera_up));\n\n        glm::vec3 scale, skew, translation;\n        glm::vec4 perspective;\n        glm::quat orientation;\n        glm::decompose(lookat, scale, orientation, translation, skew, perspective);\n\n        parsed_scene.camera.m_translation = translation;\n        parsed_scene.camera.m_rotation = orientation;\n\n        float aspect_ratio = frame_aspect_override == -1 ? camera->mAspect : frame_aspect_override;\n        float vertical_fov = 2.0f * std::atan(std::tan(camera->mHorizontalFOV * 0.5f) * aspect_ratio) + 0.425f;\n        parsed_scene.camera.projection_matrix = glm::perspective(vertical_fov, aspect_ratio, camera->mClipPlaneNear, camera->mClipPlaneFar);\n        parsed_scene.camera.vertical_fov = vertical_fov;\n\n        // Custom clip planes distances are not supported by the renderer so hardcoding to 0.1f and 100.0f\n        // instead of reading from the camera properties\n        parsed_scene.camera.near_plane = 0.1f;// camera->mClipPlaneNear;\n        parsed_scene.camera.far_plane = 100.0f;// camera->mClipPlaneFar;\n    }\n    else\n    {\n        // Creating a default camera because the scene doesn't have one\n\n        glm::mat4x4 lookat = glm::inverse(glm::lookAt(glm::vec3(0, 0, 0), glm::vec3(0, 0, -1), glm::vec3(0, 1, 0)));\n\n        glm::vec3 scale, skew, translation;\n        glm::vec4 perspective;\n        glm::quat orientation;\n        glm::decompose(lookat, scale, orientation, translation, skew, perspective);\n\n        parsed_scene.camera.m_translation = translation;\n        parsed_scene.camera.m_rotation = orientation;\n\n        float aspect_ratio = 1280.0f / 720.0f;\n        float horizontal_fov = 40.0f / 180 * M_PI;\n        float vertical_fov = 2.0f * std::atan(std::tan(horizontal_fov * 0.5f) * aspect_ratio) + 0.425f;\n        parsed_scene.camera.projection_matrix = glm::perspective(vertical_fov, aspect_ratio, 0.1f, 100.0f);\n        parsed_scene.camera.vertical_fov = vertical_fov;\n        parsed_scene.camera.near_plane = 0.1f;\n        parsed_scene.camera.far_plane = 100.0f;\n    }\n}\n\nvoid SceneParser::prepare_textures(const aiScene* scene, std::vector<std::pair<aiTextureType, std::string>>& texture_paths, std::vector<ParsedMaterialTextureIndices>& material_texture_indices, std::vector<int>& material_indices, std::vector<int>& texture_per_mesh, std::vector<int>& texture_indices_offsets, int& texture_count)\n{\n    int global_texture_index_offset = 0;\n\n    // We expect one material per mesh. It can happen that mNumMaterials is > mNumMeshes\n    // which means that there is a material that is not used in the scene then we don't\n    // want to process that material so we're then only interested in the mNumMeshes meshes\n    // that do have a material\n    for (int material_index = 0; material_index < std::min(scene->mNumMeshes, scene->mNumMaterials); material_index++)\n    {\n        aiMaterial* mesh_material = scene->mMaterials[material_index];\n        ParsedMaterialTextureIndices tex_indices;\n\n        // Reading the paths of the textures of the mesh\n        std::vector<std::pair<aiTextureType, std::string>> mesh_texture_paths;\n        mesh_texture_paths = get_textures_paths_and_indices(mesh_material, tex_indices);\n        mesh_texture_paths = normalize_texture_paths(mesh_texture_paths);\n\n        int mesh_texture_count = mesh_texture_paths.size();\n\n        material_indices.insert(material_indices.end(), mesh_texture_count, material_index);\n        material_texture_indices.push_back(tex_indices);\n        texture_paths.insert(texture_paths.end(), mesh_texture_paths.begin(), mesh_texture_paths.end());\n        texture_per_mesh.push_back(mesh_texture_count);\n        texture_indices_offsets.push_back(global_texture_index_offset);\n\n        global_texture_index_offset += mesh_texture_count;\n    }\n\n    texture_count = texture_paths.size();\n}\n\nvoid SceneParser::assign_material_texture_indices(std::vector<CPUMaterial>& materials, const std::vector<ParsedMaterialTextureIndices>& material_tex_indices, const std::vector<int>& material_textures_offsets)\n{\n    for (int material_index = 0; material_index < material_tex_indices.size(); material_index++)\n    {\n        ParsedMaterialTextureIndices mat_tex_indices = material_tex_indices[material_index];\n        CPUMaterial& renderer_material = materials[material_index];\n        int tex_index_offset = material_textures_offsets[material_index];\n\n        // Assigning\n        renderer_material.base_color_texture_index = mat_tex_indices.base_color_texture_index;\n        renderer_material.emission_texture_index = mat_tex_indices.emission_texture_index;\n        renderer_material.roughness_texture_index = mat_tex_indices.roughness_texture_index;\n        renderer_material.metallic_texture_index = mat_tex_indices.metallic_texture_index;\n        renderer_material.roughness_metallic_texture_index = mat_tex_indices.roughness_metallic_texture_index;\n        renderer_material.specular_texture_index = mat_tex_indices.specular_texture_index;\n        renderer_material.coat_texture_index = mat_tex_indices.coat_texture_index;\n        renderer_material.sheen_texture_index = mat_tex_indices.sheen_texture_index;\n        renderer_material.specular_transmission_texture_index = mat_tex_indices.specular_transmission_texture_index;\n        renderer_material.normal_map_texture_index = mat_tex_indices.normal_map_texture_index;\n\n        // Offsetting\n        renderer_material.base_color_texture_index += renderer_material.base_color_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.emission_texture_index += renderer_material.emission_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.roughness_texture_index += renderer_material.roughness_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.metallic_texture_index += renderer_material.metallic_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.roughness_metallic_texture_index += renderer_material.roughness_metallic_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.specular_texture_index += renderer_material.specular_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.coat_texture_index += renderer_material.coat_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.sheen_texture_index += renderer_material.sheen_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.specular_transmission_texture_index += renderer_material.specular_transmission_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n        renderer_material.normal_map_texture_index += renderer_material.normal_map_texture_index == MaterialConstants::NO_TEXTURE ? 0 : tex_index_offset;\n\n        auto check_max_texture_count = [](int index, int max_index) \n        {\n            if (index > max_index && (index != MaterialConstants::NO_TEXTURE && index != MaterialConstants::CONSTANT_EMISSIVE_TEXTURE))\n            {\n                g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"A scene cannot contain more than 65535 different textures. Aborting...\");\n\n                std::exit(1);\n            }\n        };\n\n        check_max_texture_count(renderer_material.base_color_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.emission_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.roughness_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.metallic_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.roughness_metallic_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.specular_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.coat_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.sheen_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.specular_transmission_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n        check_max_texture_count(renderer_material.normal_map_texture_index, MaterialConstants::MAX_TEXTURE_COUNT);\n    }\n}\n\nvoid SceneParser::dispatch_texture_loading(Scene& parsed_scene, const std::string& scene_path, int nb_threads, const std::vector<std::pair<aiTextureType, std::string>>& texture_paths, const std::vector<int>& material_indices)\n{\n    if (nb_threads == -1)\n        // As many threads as there are textures if -1 was given\n        nb_threads = texture_paths.size();\n\n    // Creating a state to keep the data that the threads need alive\n    std::shared_ptr<TextureLoadingThreadState> texture_threads_state = std::make_shared<TextureLoadingThreadState>();\n    texture_threads_state->scene_filepath = scene_path;\n    texture_threads_state->texture_paths = texture_paths;\n    texture_threads_state->material_indices = material_indices;\n\n    ThreadManager::set_thread_data(ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY, texture_threads_state);\n\n    for (int i = 0; i < nb_threads; i++)\n        ThreadManager::start_thread(ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY, ThreadFunctions::load_scene_texture, std::ref(parsed_scene), texture_threads_state->scene_filepath, std::ref(texture_threads_state->texture_paths), std::ref(texture_threads_state->material_indices), i, nb_threads);\n}\n\nvoid SceneParser::read_material_properties(aiMaterial* mesh_material, CPUMaterial& renderer_material)\n{\n    // Getting the properties that are going to be used by the materials\n    // of the application\n\n    aiReturn error_code_emissive;\n    mesh_material->Get(AI_MATKEY_COLOR_DIFFUSE, *((aiColor3D*)&renderer_material.base_color));\n    if (renderer_material.emission_texture_index == MaterialConstants::NO_TEXTURE)\n    {\n        ColorRGB32F emission;\n        mesh_material->Get(AI_MATKEY_COLOR_EMISSIVE, *((aiColor3D*)&emission));\n\n        renderer_material.emission = emission;\n    }\n\n    mesh_material->Get(AI_MATKEY_EMISSIVE_INTENSITY, renderer_material.emission_strength);\n\n    mesh_material->Get(AI_MATKEY_METALLIC_FACTOR, renderer_material.metallic);\n    mesh_material->Get(AI_MATKEY_ROUGHNESS_FACTOR, renderer_material.roughness);\n    mesh_material->Get(AI_MATKEY_ANISOTROPY_FACTOR, renderer_material.anisotropy);\n    if (!mesh_material->Get(AI_MATKEY_SHEEN_COLOR_FACTOR, *((aiColor3D*)&renderer_material.sheen_color)))\n    {\n        // We did get sheen color from the parsed scene, also trying the roughness\n        mesh_material->Get(AI_MATKEY_SHEEN_ROUGHNESS_FACTOR, renderer_material.sheen_roughness);\n        // Setting the sheen factor to the maximum because we can't really do better than\n        // that sith ASSIMP\n        renderer_material.sheen = 1.0f;\n    }\n    if (!mesh_material->Get(AI_MATKEY_SPECULAR_FACTOR, renderer_material.specular))\n    {\n        // We sucessfully got the specular color so we're going to assume that we the specular and tint are 100%\n        renderer_material.specular_tint = 1.0f;\n        renderer_material.specular_color = ColorRGB32F(1.0f);\n    }\n\n    mesh_material->Get(AI_MATKEY_CLEARCOAT_FACTOR, renderer_material.coat);\n    mesh_material->Get(AI_MATKEY_CLEARCOAT_ROUGHNESS_FACTOR, renderer_material.coat_roughness);\n    mesh_material->Get(AI_MATKEY_REFRACTI, renderer_material.ior);\n    mesh_material->Get(AI_MATKEY_TRANSMISSION_FACTOR, renderer_material.specular_transmission);\n    mesh_material->Get(AI_MATKEY_VOLUME_ATTENUATION_COLOR, renderer_material.absorption_color);\n    mesh_material->Get(AI_MATKEY_VOLUME_ATTENUATION_DISTANCE, renderer_material.absorption_at_distance);\n    mesh_material->Get(AI_MATKEY_OPACITY, renderer_material.alpha_opacity);\n\n    /*renderer_material.metallic = 1.0f;\n    renderer_material.roughness = 0.0f;*/\n\n    renderer_material.make_safe();\n}\n\nstd::vector<std::pair<aiTextureType, std::string>> SceneParser::get_textures_paths_and_indices(aiMaterial* mesh_material, ParsedMaterialTextureIndices& texture_indices)\n{\n    std::vector<std::pair<aiTextureType, std::string>> texture_paths;\n\n    texture_indices.base_color_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_BASE_COLOR, texture_paths);\n    if (texture_indices.base_color_texture_index == MaterialConstants::NO_TEXTURE)\n        // Trying diffuse for some file formats\n        // The OBJ format uses DIFFUSE instead of BASE_COLOR\n        texture_indices.base_color_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_DIFFUSE, texture_paths);\n    texture_indices.emission_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_EMISSIVE, texture_paths);\n\n    int roughness_index = get_first_texture_of_type(mesh_material, aiTextureType_DIFFUSE_ROUGHNESS, texture_paths);\n    int metallic_index = get_first_texture_of_type(mesh_material, aiTextureType_METALNESS, texture_paths);\n    if (roughness_index != MaterialConstants::NO_TEXTURE && metallic_index != MaterialConstants::NO_TEXTURE && texture_paths[roughness_index].second == texture_paths[metallic_index].second)\n    {\n        // The roughness and metallic textures are the same\n\n        // Poping the metallic path because it's the same as the roughness, we only want one\n        // otherwise the texture reader is going to read the same path (and the same texture) twice from disk\n        texture_paths.pop_back();\n        // Using the roughness index for the roughness + metallic texture\n        texture_indices.roughness_metallic_texture_index = roughness_index;\n    }\n    else\n    {\n        texture_indices.roughness_texture_index = roughness_index;\n        texture_indices.metallic_texture_index = metallic_index;\n    }\n\n    texture_indices.specular_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_SPECULAR, texture_paths);\n    texture_indices.coat_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_CLEARCOAT, texture_paths);\n    texture_indices.sheen_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_SHEEN, texture_paths);\n    texture_indices.specular_transmission_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_TRANSMISSION, texture_paths);\n\n    texture_indices.normal_map_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_NORMALS, texture_paths);\n    if (texture_indices.normal_map_texture_index == MaterialConstants::NO_TEXTURE)\n        // Trying HEIGHT for the normal map for some file formats\n        texture_indices.normal_map_texture_index = get_first_texture_of_type(mesh_material, aiTextureType_HEIGHT, texture_paths);\n\n    if (texture_indices.normal_map_texture_index != MaterialConstants::NO_TEXTURE &&\n        texture_indices.base_color_texture_index != MaterialConstants::NO_TEXTURE &&\n        texture_paths[texture_indices.base_color_texture_index].second == texture_paths[texture_indices.normal_map_texture_index].second)\n    {\n        // Some scenes exported from Blender (or any other 3D software really)\n        // can sometimes use their own base color texture as\n        // some kind of bump map.\n        //\n        // This is not supported by this renderer so we're just not going to use normal mapping for\n        // this object\n        texture_indices.normal_map_texture_index = MaterialConstants::NO_TEXTURE;\n        // Popping the texture so that we don't load it\n        texture_paths.pop_back();\n\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Material \\\"%s\\\" uses its base color texture as a normal map (texture is: %s); This is not supported and normal mapping will be disabled for this material.\", mesh_material->GetName().C_Str(), texture_paths[texture_indices.base_color_texture_index].second.c_str());\n    }\n\n    return texture_paths;\n}\n\nint SceneParser::get_first_texture_of_type(aiMaterial* mesh_material, aiTextureType type, std::vector<std::pair<aiTextureType, std::string>>& texture_path_list)\n{\n    int tex_count = mesh_material->GetTextureCount(type);\n    if (tex_count == 0)\n        return MaterialConstants::NO_TEXTURE;\n    else\n    {\n        aiString aiPath;\n        mesh_material->GetTexture(type, 0, &aiPath);\n\n        std::string string_path = std::string(aiPath.data);\n        if (string_path.empty())\n            return MaterialConstants::NO_TEXTURE;\n\n        texture_path_list.push_back(std::make_pair(type, string_path));\n\n        return texture_path_list.size() - 1;\n    }\n}\n\nstd::vector<std::pair<aiTextureType, std::string>> SceneParser::normalize_texture_paths(std::vector<std::pair<aiTextureType, std::string>>& paths)\n{\n    std::vector<std::pair<aiTextureType, std::string>> normalized_paths;\n    normalized_paths.reserve(paths.size());\n\n    for (auto pair : paths)\n    {\n        size_t find_index = pair.second.find(\"%20\");\n        while (find_index != (size_t)-1)\n        {\n            pair.second = pair.second.replace(find_index, 3, \" \");\n            find_index = pair.second.find(\"%20\");\n        }\n\n        normalized_paths.push_back(pair);\n    }\n\n    return normalized_paths;\n}\n\nCPUMaterial SceneParser::offset_textures_indices(const CPUMaterial& renderer_material, int offset)\n{\n    CPUMaterial out_mat = renderer_material;\n\n   out_mat.emission_texture_index += (renderer_material.emission_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n   out_mat.base_color_texture_index += (renderer_material.base_color_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n\n   out_mat.roughness_texture_index += (renderer_material.roughness_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n\n   out_mat.metallic_texture_index += (renderer_material.metallic_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n   out_mat.anisotropic_texture_index += (renderer_material.anisotropic_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n\n   out_mat.specular_texture_index += (renderer_material.specular_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n   out_mat.coat_texture_index += (renderer_material.coat_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n   out_mat.sheen_texture_index += (renderer_material.sheen_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n   out_mat.specular_transmission_texture_index += (renderer_material.specular_transmission_texture_index == MaterialConstants::NO_TEXTURE) ? 0 : offset;\n\n   return out_mat;\n}\n"
  },
  {
    "path": "src/Scene/SceneParser.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef SCENE_PARSER_H\n#define SCENE_PARSER_H\n\n#include \"assimp/Importer.hpp\"\n#include \"assimp/scene.h\"\n#include \"assimp/postprocess.h\"\n\n#include \"HostDeviceCommon/Material/MaterialCPU.h\"\n#include \"HostDeviceCommon/Material/MaterialUtils.h\"\n#include \"Image/Image.h\"\n#include \"Scene/BoundingBox.h\"\n#include \"Scene/Camera.h\"\n#include \"Renderer/Sphere.h\"\n#include \"Renderer/Triangle.h\"\n#include \"Utils/Utils.h\"\n\n#include <filesystem>\n#include <thread>\n#include <vector>\n\n/**\n * Structure that holds the indices of the textures of a material during scene parsing\n */\nstruct ParsedMaterialTextureIndices\n{\n    int base_color_texture_index = MaterialConstants::NO_TEXTURE;\n    int emission_texture_index = MaterialConstants::NO_TEXTURE;\n\n    int roughness_texture_index = MaterialConstants::NO_TEXTURE;\n    int metallic_texture_index = MaterialConstants::NO_TEXTURE;\n    int roughness_metallic_texture_index = MaterialConstants::NO_TEXTURE;\n\n    int specular_texture_index = MaterialConstants::NO_TEXTURE;\n    int coat_texture_index = MaterialConstants::NO_TEXTURE;\n    int sheen_texture_index = MaterialConstants::NO_TEXTURE;\n    int specular_transmission_texture_index = MaterialConstants::NO_TEXTURE;\n\n    int normal_map_texture_index = MaterialConstants::NO_TEXTURE;\n};\n\nstruct SceneParserOptions\n{\n    SceneParserOptions() {}\n\n    /**\n     * The scene filepath passed as argument is analyzed and it is \n     * determined whether that scene is on an SSD or an HDD\n     * \n     * If the file is on an SSD, the number of texture loading threads will be\n     * adjusted higher to keep the CPU busy (because on an SSD, we're probably CPU bound)\n     * \n     * If we're on an HDD, the number of threads is adjusted lower not to overwhelm the HDD\n     * (because too many threads loading different textures in parallel on a HDD is likely to\n     * turn everything into random reads which destroys the read performance of an HDD. SSDs too but\n     * it's not as bad)\n     */\n    SceneParserOptions(const std::string& scene_filepath)\n    {\n        std::filesystem::path true_filepath = scene_filepath;\n        if (std::filesystem::is_symlink(scene_filepath))\n            true_filepath = std::filesystem::read_symlink(scene_filepath);\n\n        if (Utils::is_file_on_ssd(true_filepath.string().c_str())) \n            nb_texture_threads = 16;\n        else\n            nb_texture_threads = 4;\n    }\n\n    float override_aspect_ratio = 16.0f / 9.0f;\n\n    // How many CPU threads to use when loading the textures of the scene.\n    // \n    // Note that blindly defaulting to 1 thread per texture may not be the\n    // best idea, especially on HDDs. This is because with one thread per texture,\n    // all textures will be loading at the same time. Although this may utilize the\n    // CPU very well, this will cause A LOT of random read accesses on the drive\n    // can SIGNIFICANTLY degrade performance. This is mostly applicable to HDDs but\n    // to SSDs too to some extent. You may want to use a higher thread count for SSDs\n    // though to be sure to feed enough work to the CPU to keep up with the fast SSD.\n    // \n    // -1 to use one thread per texture.\n    //\n    // 16 seemed to be a good arbitrary number to avoid trashing the disks on my setup \n    // (tested on the Amazon Lumberyard Bistro on both HDD and SSD)\n    int nb_texture_threads = 4;\n};\n\nstruct SceneMetadata\n{\n    // The material names are used for displaying in the material editor of ImGui\n    std::vector<std::string> material_names;\n    // Names of the objects in the scene\n    std::vector<std::string> mesh_names;\n    // For a given mesh index, its material index\n    std::vector<int> mesh_material_indices;\n\n    // AABBs of the meshes of the scene\n    std::vector<BoundingBox> mesh_bounding_boxes;\n\n    // AABB of the whole scene\n    BoundingBox scene_bounding_box;\n};\n\nstruct Scene\n{\n    SceneMetadata metadata;\n\n    std::vector<CPUMaterial> materials;\n    // Material textures. Needs to be index by a material index. \n    std::vector<Image8Bit> textures;\n\n    std::vector<int> triangles_vertex_indices;\n    std::vector<float3> vertices_positions;\n    std::vector<unsigned char> has_vertex_normals;\n    std::vector<float3> vertex_normals;\n    std::vector<float2> texcoords;\n    std::vector<float> triangle_areas;\n    // Vertex A, edges AB and AC of the emissive triangles of the scene\n    /*std::vector<float3> triangle_A;\n    std::vector<float3> triangle_AB;\n    std::vector<float3> triangle_AC;*/\n\n\t// Contains the primitive indices of all the emissives triangles that will be used for light sampling.\n\t// Does not contain the emissive triangles that have emissive textures because those are not light sampled\n    // at the moment.\n    std::vector<int> emissive_triangles_primitive_indices;\n    // Contains the primitive indices of all the emissives triangles that will be used for light sampling.\n\t// The difference with 'emissive_triangle_primitive_indices_for_light_sampling' at the moment is that this one\n\t// contains the indices of the emissive triangles that have emissive textures.\n    std::vector<int> emissive_triangles_primitive_indices_and_emissive_textures;\n    std::vector<int> emissive_triangle_vertex_indices;\n\n    std::vector<int> material_indices;\n    std::vector<bool> material_has_opaque_base_color_texture;\n\n    bool has_camera = false;\n    Camera camera;\n\n    Sphere add_sphere(const float3& center, float radius, const CPUMaterial& material, int primitive_index)\n    {\n        int material_index = materials.size();\n\n        materials.push_back(material);\n        material_indices.push_back(material_index);\n\n        Sphere sphere(center, radius, primitive_index);\n\n        return sphere;\n    }\n\n    std::vector<Triangle> get_triangles(const std::vector<int> triangle_indices_to_get)\n    {\n        std::vector<Triangle> triangles;\n\t\ttriangles.reserve(triangle_indices_to_get.size() / 3);\n\n        for (int i = 0; i < triangle_indices_to_get.size(); i += 3)\n        {\n            triangles.push_back(Triangle(*reinterpret_cast<float3*>(&vertices_positions[triangle_indices_to_get[i + 0]]),\n                                         *reinterpret_cast<float3*>(&vertices_positions[triangle_indices_to_get[i + 1]]),\n                                         *reinterpret_cast<float3*>(&vertices_positions[triangle_indices_to_get[i + 2]])));\n        }\n\n        return triangles;\n    }\n};\n\nclass SceneParser\n{\npublic:\n    /**\n     * Parses the scene file at @filepath and stores the parsed data in the parsed_scene parameter.\n     * All formats supported by the ASSIMP library are supported by the renderer.\n     * \n     * If provided, the @frame_aspect_override parameter in the options structure is meant to override \n     * the aspect ratio of the camera of the scene file (if any). This is useful because the renderer\n     * uses a default aspect ratio of 16:9 but the camera of the scene file may not use the same aspect. \n     * Without this parameter, this would result in rendering the scene with an aspect different of 16:9 in the default \n     * framebuffer of the renderer which is 16:9, resulting in deformations.\n     */\n    static void parse_scene_file(std::string filepath, Assimp::Importer& assimp_importer, Scene& parsed_scene, SceneParserOptions& options);\n\nprivate:\n\n    static void parse_camera(const aiScene* scene, Scene& parsed_scene, float frame_aspect_override);\n\n    /** \n     * Prepares all the necessary data for multithreaded texture-loading\n     * \n     * @ scene is the scene to parse the textures from\n     * @ textures_paths is a list of pair of <texture_type -> the path to the texture>.\n     * @ material_texture_indices is a list that is as long as there are unique materials\n     *      in the scene. Each field of the stucture contains the index of the texture used\n     *      by that material. -1 if the material doesn't have that type of texture\n     *      (if structure.base_color_texture_index == MaterialConstants::NO_TEXTURE for example, that means\n     *      that the material doesn't have a base color texture)\n     * @ material_indices is a vector which is 'number of textures' long and contains the\n     *      index of the material that the texture belongs to.\n     *      If material_indices[3] == 2, this means that the texture 3 (the fourth texure)\n     *      is used by material 2 (which is the third material)\n     * @ texture_per_mesh is a list that is 'number of mesh' long and that gives the number\n     *      of textures used per mesh\n     * @ texture_indices_offset /By how much to offset the indices of the textures used by a material.\n     *      For example, if there are 5 materials in the scene that all use a different base color\n     *      texture, after the call to prepare_textures(), they will all have 0 as the index of their\n     *      base color texture. This is obviously wrong and it should be 0, 1, 2, 3, 4 for\n     *      each material since they use their own texture. This is what this vector is for, it contains\n     *      the offsets that are going to be used so that each material has proper texture indices.\n     * @ texture_count How many texture are in the scene\n     */\n    static void prepare_textures(const aiScene* scene, std::vector<std::pair<aiTextureType, std::string>>& texture_paths, std::vector<ParsedMaterialTextureIndices>& material_texture_indices, std::vector<int>& material_indices, std::vector<int>& texture_per_mesh, std::vector<int>& texture_indices_offsets, int& texture_count);\n    static void assign_material_texture_indices(std::vector<CPUMaterial>& materials, const std::vector<ParsedMaterialTextureIndices>& material_tex_indices, const std::vector<int>& material_textures_offsets);\n    static void dispatch_texture_loading(Scene& parsed_scene, const std::string& scene_path, int nb_threads, const std::vector<std::pair<aiTextureType, std::string>>& texture_paths, const std::vector<int>& material_indices);\n\n    static void read_material_properties(aiMaterial* mesh_material, CPUMaterial& renderer_material);\n    /**\n     * Check if the mesh material has a texture of the given type. If so, returns the index of the\n     * texture within texturePathList and appends the path of the texture to the list. If the material\n     * doesn't have the required texture, returns -1\n     */\n    static int get_first_texture_of_type(aiMaterial* mesh_material, aiTextureType type, std::vector<std::pair<aiTextureType, std::string>>& texture_path_list);\n    static std::vector<std::pair<aiTextureType, std::string>> get_textures_paths_and_indices(aiMaterial* mesh_material, ParsedMaterialTextureIndices& texture_indices);\n    static std::vector<std::pair<aiTextureType, std::string>> normalize_texture_paths(std::vector<std::pair<aiTextureType, std::string>>& paths);\n    static CPUMaterial offset_textures_indices(const CPUMaterial& renderer_material, int offset);\n};\n\n#endif\n"
  },
  {
    "path": "src/Shaders/albedo_display.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n #version 430\n\nuniform sampler2D u_texture;\nuniform int u_resolution_scaling;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\n\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\n\tivec2 dims = textureSize(u_texture, 0);\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\tuvec4 ucolor = uvec4(texelFetch(u_texture, thread_id / u_resolution_scaling, 0) * 255);\n\timageStore(u_output_image, thread_id, ucolor);\n#else\n\tout_color = texture(u_texture, vs_tex_coords / u_resolution_scaling);\n#endif\n};"
  },
  {
    "path": "src/Shaders/blend_2_display.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n#version 430\n\n// 0.0f gives 100% of texture 1\n// 1.0f gives 100% of texture_2\nuniform float u_blend_factor;\n\nuniform sampler2D u_texture_1;\nuniform sampler2D u_texture_2;\n\n// How many samples to scale texture 1 and 2 by\nuniform int u_sample_number_1;\nuniform int u_sample_number_2;\n\nuniform int u_resolution_scaling;\n\nuniform float u_gamma;\nuniform float u_exposure;\nuniform int u_do_tonemapping;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\n\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 dims = textureSize(u_texture_1, 0);\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\tvec4 hdr_color_1 = texelFetch(u_texture_1, thread_id / u_resolution_scaling, 0);\n\tvec4 hdr_color_2 = texelFetch(u_texture_2, thread_id / u_resolution_scaling, 0);\n#else\n\tvec4 hdr_color_1 = texture(u_texture_1, vs_tex_coords / u_resolution_scaling);\n\tvec4 hdr_color_2 = texture(u_texture_2, vs_tex_coords / u_resolution_scaling);\n#endif\n\n\tvec4 final_color_1 = hdr_color_1;\n\tvec4 final_color_2 = hdr_color_2;\n\n\t// Scaling by sample count\n\tfinal_color_1 = final_color_1 / float(max(1, u_sample_number_1));\n\tfinal_color_2 = final_color_2 / float(max(1, u_sample_number_2));\n\t\t\n\tif (u_do_tonemapping == 1)\n\t{\n\t\tvec4 tone_mapped_1 = 1.0f - exp(-final_color_1 * u_exposure);\n\t\tvec4 tone_mapped_2 = 1.0f - exp(-final_color_2 * u_exposure);\n\n\t\tfinal_color_1 = pow(tone_mapped_1, vec4(1.0f / u_gamma));\n\t\tfinal_color_2 = pow(tone_mapped_2, vec4(1.0f / u_gamma));\n\t}\n\n\tfinal_color_1 = vec4(final_color_1.rgb, 1.0f);\n\tfinal_color_2 = vec4(final_color_2.rgb, 1.0f);\n\n\tvec4 blended_color = final_color_1 * (1.0f - u_blend_factor) + final_color_2 * u_blend_factor;\n#ifdef COMPUTE_SCREENSHOTER\n\tuvec4 ublended_color = uvec4(blended_color * 255);\n\timageStore(u_output_image, thread_id, ublended_color);\n#else\n\tout_color = blended_color;\n#endif // COMPUTE_SCREENSHOTER\n};\n"
  },
  {
    "path": "src/Shaders/boolmap_int.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n #version 430\n\n// This is a 'scalar' texture, containing data only in the red channel\n// In this shader, it represents the sample count per pixel\nuniform isampler2D u_texture;\nuniform int u_resolution_scaling;\nuniform float u_threshold_val;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 dims = textureSize(u_texture, 0);\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\t// We're using abs() here because the sampling count can be negative if \n\t// the pixel isn't being sampled anymore (it has converged and has been \n\t// excluded by the adaptive sampling)\n\tfloat scalar = texelFetch(u_texture, thread_id / u_resolution_scaling, 0).r;\n#else\n\tfloat scalar = texture(u_texture, vs_tex_coords / u_resolution_scaling).r;\n#endif\n\t\n\tfloat final_color = 0.0f;\n\tif (scalar < u_threshold_val && scalar != -1.0f)\n\t\t// If the value of the scalar is -1, this is a special value which is used\n\t\t// by the adaptive sampling to indicate that a pixel has not converged yet.\n\t\t// If the pixel has not converged yet, then it must have the \"hotter\" color\n\t\t// which the color of the u_max_val\n\t\t//\n\t\t// We only set the pixel white if it has a value lower than the threshold\n\t\t// number. The threshold is the current sample count so if the pixel has a\n\t\t// sample count lower than that, that means that it has converged\n\t\tfinal_color = 1.0f;\n\n#ifdef COMPUTE_SCREENSHOTER\n\tuvec4 ufinal_color = uvec4(uvec3(final_color) * 255.0f, 255);\n\timageStore(u_output_image, thread_id, ufinal_color);\n#else\n\tout_color = vec4(vec3(final_color), 1.0f);\n#endif // COMPUTE_SCREENSHOTER\n};\n"
  },
  {
    "path": "src/Shaders/default_display.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n#version 430\n\nuniform sampler2D u_texture;\nuniform int u_sample_number;\nuniform int u_resolution_scaling;\n\nuniform float u_gamma;\nuniform float u_exposure;\nuniform int u_do_tonemapping;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\n\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 dims = textureSize(u_texture, 0);\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\tvec4 hdr_color = texelFetch(u_texture, thread_id / u_resolution_scaling, 0);\n#else\n\tvec4 hdr_color = texture(u_texture, vs_tex_coords / u_resolution_scaling);\n#endif\n\n\tvec4 final_color = hdr_color;\n\t// Scaling by sample count\n\tfinal_color = final_color / float(u_sample_number);\n\tfinal_color = clamp(final_color, 0.0f, 1.0e35f);\n\t\t\n\tif (u_do_tonemapping == 1)\n\t{\n\t\tvec4 tone_mapped = 1.0f - exp(-final_color * u_exposure);\n\t\tfinal_color = pow(tone_mapped, vec4(1.0f / u_gamma));\n\t}\n\n\tfinal_color = vec4(final_color.rgb, 1.0f);\n\n#ifdef COMPUTE_SCREENSHOTER\n\timageStore(u_output_image, thread_id, uvec4(final_color * 255));\n#else\n\tout_color = final_color;\n#endif // COMPUTE_SCREENSHOTER\n};\n"
  },
  {
    "path": "src/Shaders/fullscreen_quad.vert",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n #version 330\n\nout vec2 vs_tex_coords;\n\nvoid main()\n{\n\tvec2 triangle_vertices[6] = vec2[6](vec2(-1, -1), vec2(1, -1), vec2(-1, 1), vec2(1, -1), vec2(1, 1), vec2(-1, 1));\n\tvec2 triangle_tex_coords[6] = vec2[6](vec2(0, 0), vec2(1, 0), vec2(0, 1), vec2(1, 0), vec2(1, 1), vec2(0, 1));\n\n\tgl_Position = vec4(triangle_vertices[gl_VertexID], 1, 1);\n\tvs_tex_coords = triangle_tex_coords[gl_VertexID];\n};\n"
  },
  {
    "path": "src/Shaders/heatmap_int.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n #version 430\n\n// This is a 'scalar' texture, containing data only in the red channel\n// In this shader, it represents the sample count per pixel\nuniform isampler2D u_texture;\nuniform int u_resolution_scaling;\n\n// This shader supports up to 16 color stops. This doesn't mean that\n// the user has to provide 16 stops. The user only provides X stops as\n// indicated by u_nb_stops\nuniform vec3 u_color_stops[16];\nuniform int u_nb_stops;\n\nuniform float u_min_val;\nuniform float u_max_val;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 dims = textureSize(u_texture, 0);\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\t// We're using abs() here because the sampling count can be negative if \n\t// the pixel isn't being sampled anymore (it has converged and has been \n\t// excluded by the adaptive sampling)\n\tfloat scalar = texelFetch(u_texture, thread_id / u_resolution_scaling, 0).r;\n#else\n\tfloat scalar = texture(u_texture, vs_tex_coords / u_resolution_scaling).r;\n#endif\n\t\n\tif (u_min_val == u_max_val)\n\t{\n\t\t// If the bounds are the same, arbitrarily choosing\n\t\t// to color with the last color stop\n#ifdef COMPUTE_SCREENSHOTER\n\tuvec4 output_color = uvec4(uvec3(u_color_stops[u_nb_stops - 1] * 255), 255);\n\timageStore(u_output_image, thread_id, output_color);\n#else\n\tout_color = vec4(u_color_stops[u_nb_stops - 1], 1.0f);\n#endif\n\n\t\treturn;\n\t}\n\n\tif (scalar == -1.0f)\n\t\t// If the value of the scalar is -1, this is a special value which is used\n\t\t// by the adaptive sampling to indicate that a pixel has not converged yet.\n\t\t// If the pixel has not converged yet, then it must have the \"hotter\" color\n\t\t// which the color of the u_max_val\n\t\tscalar = u_max_val;\n\n\tscalar = clamp(scalar, u_min_val, u_max_val);\n\t// Brings scalar between 0 and 1 relative to u_min_val and u_max_val\n\tfloat normalized = (scalar - u_min_val) / (u_max_val - u_min_val);\n\n\t// This indicates the stop to use but this is a float so it could be 1.5 for example\n\t// which would mean that we would have to pick 50% of u_color_stops[1] + 50% of u_color_stops[2]\n\tfloat stop = normalized * (u_nb_stops - 1);\n\n\tint low_stop = int(floor(stop));\n\tint high_stop = int(ceil(stop));\n\tfloat fraction_of_high_stop = stop - low_stop;\n\n\t// Lerping between the 2 stops\n\t// Example:\n\t// For a 'stop' value of 2.37, we get low_stop = 2, high_stop = 3\n\t// and fraction_of_high_stop = 0.37\n\t// We're closer to stop 2 than stop 3 so we should interpolate more of stop 2 than stop 3\n\t// This gives us \n\t//\n\t// out_color = stop2 * (1.0f - 0.37) + stop3 * 0.37 \n\t//\n\t// which is more of stop2 than stop3\n\tvec4 final_color = vec4(mix(u_color_stops[low_stop], u_color_stops[high_stop], fraction_of_high_stop), 1.0f);\n#ifdef COMPUTE_SCREENSHOTER\n\tuvec4 ufinal_color = uvec4(final_color * 255.0f);\n\timageStore(u_output_image, thread_id, ufinal_color);\n#else\n\tout_color = final_color;\n#endif // COMPUTE_SCREENSHOTER\n};\n"
  },
  {
    "path": "src/Shaders/normal_display.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n #version 430\n\nuniform sampler2D u_texture;\nuniform int u_resolution_scaling;\n\nuniform float u_gamma;\nuniform float u_exposure;\nuniform int u_do_tonemapping;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\n\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\n\tivec2 dims = textureSize(u_texture, 0);\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\tvec4 hdr_color = texelFetch(u_texture, thread_id / u_resolution_scaling, 0);\n#else\n\tvec4 hdr_color = texture(u_texture, vs_tex_coords / u_resolution_scaling);\n#endif\n\n\tvec4 final_color = hdr_color;\n\t// Remapping normals for displaying\n\tfinal_color = (final_color + 1.0f) * 0.5f;\n\t\t\n\tif (u_do_tonemapping == 1)\n\t{\n\t\tvec4 tone_mapped = 1.0f - exp(-final_color * u_exposure);\n\t\tfinal_color = pow(tone_mapped, vec4(1.0f / u_gamma));\n\t}\n\n\tfinal_color = vec4(final_color.rgb, 1.0f);\n\n#ifdef COMPUTE_SCREENSHOTER\n\tuvec4 ufinal_color = uvec4(final_color * 255);\n\timageStore(u_output_image, thread_id, ufinal_color);\n#else\n\tout_color = final_color;\n#endif\n};"
  },
  {
    "path": "src/Shaders/white_furnace_threshold.frag",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n \n\n/**\n * This shader is meant to be used with a white furnace scene and\n * it will highlight in green and red pixel values that are below\n * or above 0.5f respectively. \n * 0.5f is the correct assumed value for a furnace test on a material\n * with 1.0f albedo and 0.5f ambient lighting\n */\n#version 430\n\nuniform sampler2D u_texture;\nuniform int u_sample_number;\nuniform int u_resolution_scaling;\n\nuniform float u_gamma;\nuniform float u_exposure;\nuniform int u_do_tonemapping;\n\n// If true, the shader will display\n// pixel that lose energy as green. Pixels will not be highlighted\n// if false\nuniform bool u_use_low_threshold;\n// If true, the shader will display\n// pixel that gain energy as red. Pixels will not be highlighted\n// if false\nuniform bool u_use_high_threshold;\n\n#ifdef COMPUTE_SCREENSHOTER\nuniform layout(binding = 2, rgba8ui) writeonly uimage2D u_output_image;\n#else\nin vec2 vs_tex_coords;\nout vec4 out_color;\n#endif // COMPUTE_SCREENSHOTER\n\n#ifdef COMPUTE_SCREENSHOTER\nlayout(local_size_x = 8, local_size_y = 8) in;\n#endif // COMPUTE_SCREENSHOTER\n\nvoid main()\n{\n#ifdef COMPUTE_SCREENSHOTER\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 dims = textureSize(u_texture, 0);\t\t\t\t\t\t\t\t\t\t\t\t\t\n\tivec2 thread_id = ivec2(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y);\t\t\t\t\n\tif (thread_id.x >= dims.x || thread_id.y >= dims.y)\t\t\t\t\t\t\t\n\t\treturn;\n\n\tvec4 hdr_color = texelFetch(u_texture, thread_id / u_resolution_scaling, 0);\n#else\n\tvec4 hdr_color = texture(u_texture, vs_tex_coords / u_resolution_scaling);\n#endif\n\n\tvec4 final_color = hdr_color;\n\t// Scaling by sample count\n\tfinal_color = final_color / float(u_sample_number);\n\t\n\tif ((final_color.r > 0.505f || final_color.g > 0.505f || final_color.b > 0.505f) && u_use_high_threshold)\n\t\tfinal_color = vec4(1.0f, 0.0f, 0.0f, 1.0f);\n\telse if ((final_color.r < 0.495f || final_color.g < 0.495f || final_color.b < 0.495f) && u_use_low_threshold)\n\t\tfinal_color = vec4(0.0f, 1.0f, 0.0f, 1.0f);\n\t\t\n\tif (u_do_tonemapping == 1)\n\t{\n\t\tvec4 tone_mapped = 1.0f - exp(-final_color * u_exposure);\n\t\tfinal_color = pow(tone_mapped, vec4(1.0f / u_gamma));\n\t}\n\n\tfinal_color = vec4(final_color.rgb, 1.0f);\n\n#ifdef COMPUTE_SCREENSHOTER\n\timageStore(u_output_image, thread_id, uvec4(final_color * 255));\n#else\n\tout_color = final_color;\n#endif // COMPUTE_SCREENSHOTER\n};\n"
  },
  {
    "path": "src/Threads/ThreadFunctions.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Image/Image.h\"\n#include \"Compiler/GPUKernel.h\"\n#include \"Threads/ThreadFunctions.h\"\n\n // For replacing backslashes in texture paths\n#include <regex>\n\nvoid ThreadFunctions::compile_kernel(std::shared_ptr<GPUKernel> kernel, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n    kernel->compile(hiprt_orochi_ctx, func_name_sets, true, false);\n}\n\nvoid ThreadFunctions::compile_kernel_silent(std::shared_ptr<GPUKernel> kernel, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n    kernel->compile(hiprt_orochi_ctx, func_name_sets, true, true);\n}\n\nvoid ThreadFunctions::precompile_kernel(const std::string& kernel_function_name, const std::string& kernel_filepath, GPUKernelCompilerOptions options, std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets)\n{\n    OROCHI_CHECK_ERROR(oroCtxSetCurrent(hiprt_orochi_ctx->orochi_ctx));\n\n    GPUKernel kernel(kernel_filepath, kernel_function_name);\n    kernel.set_precompiled(true);\n    kernel.get_kernel_options() = options;\n    kernel.compile(hiprt_orochi_ctx, func_name_sets, true, true);\n}\n\nvoid ThreadFunctions::load_scene_texture(Scene& parsed_scene, std::string scene_path, const std::vector<std::pair<aiTextureType, std::string>>& tex_paths, const std::vector<int>& material_indices, int thread_index, int nb_threads)\n{\n    // Preparing the scene_filepath so that it's ready to be appended with the texture name\n    std::string corrected_filepath;\n    // Starting with the .GLTF/.OBJ/.whatever-scene-format file\n    corrected_filepath = scene_path;\n    // Removing the name of the .GLTF / .OBJ / .XXX file by looking at the *last* '/' or '\\'\n    if (corrected_filepath.find('/') != std::string::npos)\n        corrected_filepath = corrected_filepath.substr(0, corrected_filepath.rfind('/') + 1);\n    else if (corrected_filepath.find('\\\\') != std::string::npos)\n        corrected_filepath = corrected_filepath.substr(0, corrected_filepath.rfind('\\\\') + 1);\n    // Converting the path to absolute\n    corrected_filepath = std::filesystem::absolute(corrected_filepath).string();\n    // Replacing backslashes by forward slashes\n    corrected_filepath = std::regex_replace(corrected_filepath, std::regex(\"\\\\\\\\\"), \"/\"); // replace 'def' -> 'klm'\n\n    // While loop here so that a single thread can parse multiple textures\n    while (thread_index < parsed_scene.textures.size())\n    {\n        // Taking the name of the texture\n        std::string texture_file_path = tex_paths[thread_index].second;\n        // Adding the name of the texture to the absolute path of the scene file such that\n        // we're looking for textures next to the GLTF file\n        std::string full_path = corrected_filepath + texture_file_path;\n        aiTextureType type = tex_paths[thread_index].first;\n        int nb_channels;\n\n        switch (type)\n        {\n        case aiTextureType_BASE_COLOR:\n        case aiTextureType_DIFFUSE:\n            // 4 Channels because we may want the alpha for transparency handling\n            nb_channels = 4;\n            break;\n\n        case aiTextureType_NORMALS:\n        case aiTextureType_HEIGHT:\n            // Don't need the alpha\n            // TODO we only need 3 channels here but it's tricky to handle 3 channels texture with HIP/CUDA. Supported formats are only 1, 2, 4 channels, not three\n            nb_channels = 4;\n            break;\n\n        case aiTextureType_DIFFUSE_ROUGHNESS:\n            if (parsed_scene.materials[material_indices[thread_index]].roughness_metallic_texture_index != MaterialConstants::NO_TEXTURE)\n            {\n                // This means we have a packed metallic/roughness texture\n                nb_channels = 4;\n\n                break;\n            }\n            else\n            {\n                // Otherwise, we don't have a packed metallic/roughness texture so only 1 channel just for the roughness\n                nb_channels = 1;\n\n                break;\n            }\n\n        case aiTextureType_EMISSIVE:\n            // TODO we only need 3 channels here but it's tricky to handle 3 channels texture with HIP/CUDA. Supported formats are only 1, 2, 4 channels, not three\n            nb_channels = 4;\n            break;\n\n        default:\n            nb_channels = 1;\n            break;\n        }\n\n        Image8Bit texture = Image8Bit::read_image(full_path, nb_channels, false);\n\n        int material_index = material_indices[thread_index];\n        if (type == aiTextureType_EMISSIVE)\n        {\n            if (texture.is_constant_color(/* threshold */ 5))\n            {\n                // The emissive texture is constant color, we can then just not use that texture and use \n                // the emission filed of the material to store the emission of the texture\n                parsed_scene.materials[material_index].emission_texture_index = MaterialConstants::CONSTANT_EMISSIVE_TEXTURE;\n\n                ColorRGBA32F emission_rgba = texture.sample_rgba32f(make_float2(0, 0));\n                parsed_scene.materials[material_index].emission = ColorRGB32F(emission_rgba.r, emission_rgba.g, emission_rgba.b);\n            }\n            else\n                // If not emissive texture special case, we can actually read the texture\n                parsed_scene.textures[thread_index] = texture;\n        }\n        else\n        {\n            // If not emissive texture special case, we can actually read the texture\n\n            if (type == aiTextureType_DIFFUSE || type == aiTextureType_BASE_COLOR)\n            {\n                // For base color textures, we're going to search for alpha transparency in the texture\n                unsigned char texture_fully_opaque = texture.is_fully_opaque() ? 1 : 0;\n                parsed_scene.material_has_opaque_base_color_texture[material_index] = texture_fully_opaque;\n            }\n            parsed_scene.textures[thread_index] = texture;\n        }\n\n        thread_index += nb_threads;\n    }\n}\n\nvoid ThreadFunctions::load_scene_parse_emissive_triangles(const aiScene* scene, Scene& parsed_scene)\n{\n    int current_emissive_triangle_index = 0;\n\n    // If the scene contains multiple meshes, each mesh will have\n    // its vertices indices starting at 0. We don't want that.\n    // \n    // We want indices to be continuously growing (because we don't want\n    // the second mesh (with indices starting at 0, i.e its own indices) to use\n    // the vertices of the first mesh that have been parsed (and that use indices 0!)\n    // The offset thus offsets the indices of the meshes that come after the first one\n    // to account for all the indices of the previously parsed meshes\n    //\n    // This is only used for the emissives triangles vertex indices\n    int global_indices_offset = 0;\n\n    // Looping over all the meshes\n    for (int mesh_index = 0; mesh_index < scene->mNumMeshes; mesh_index++)\n    {\n        aiMesh* mesh = scene->mMeshes[mesh_index];\n        int material_index = mesh->mMaterialIndex;\n\n        CPUMaterial& renderer_material = parsed_scene.materials[material_index];\n\n        // If the mesh is emissive, we're going to add the indices of its faces to the emissive triangles\n        // of the scene such that the triangles can be importance sampled (direct lighting estimation / next-event estimation)\n        //\n        // We are not importance sampling emissive texture so if the mesh has an emissive texture attached, we're\n        // not adding its triangles to the list of emissive triangles\n        bool emissive_texture_used = renderer_material.emission_texture_index != MaterialConstants::NO_TEXTURE && renderer_material.emission_texture_index != MaterialConstants::CONSTANT_EMISSIVE_TEXTURE;\n        bool is_mesh_emissive = renderer_material.is_emissive() || emissive_texture_used;\n\n        int max_emissive_mesh_index_offset = 0;\n        for (int face_index = 0; face_index < mesh->mNumFaces; face_index++, current_emissive_triangle_index++)\n        {\n            int index_1 = mesh->mFaces[face_index].mIndices[0];\n            int index_2 = mesh->mFaces[face_index].mIndices[1];\n            int index_3 = mesh->mFaces[face_index].mIndices[2];\n\n            // Accumulating the maximum index of this mesh, this is to know\n            max_emissive_mesh_index_offset = std::max(max_emissive_mesh_index_offset, std::max(index_1, std::max(index_2, index_3)));\n\n            if (is_mesh_emissive)\n            {\n                if (!emissive_texture_used)\n                    // Pushing the index of the current triangle if we're looping on an emissive mesh\n                    // and if that mesh doesn't have an emissive texture because we're not importance\n                    // sampling emissive textures\n                    parsed_scene.emissive_triangles_primitive_indices.push_back(current_emissive_triangle_index);\n\n                parsed_scene.emissive_triangle_vertex_indices.push_back(index_1 + global_indices_offset);\n                parsed_scene.emissive_triangle_vertex_indices.push_back(index_2 + global_indices_offset);\n                parsed_scene.emissive_triangle_vertex_indices.push_back(index_3 + global_indices_offset);\n                parsed_scene.emissive_triangles_primitive_indices_and_emissive_textures.push_back(current_emissive_triangle_index);\n            }\n        }\n\n\t\tglobal_indices_offset += max_emissive_mesh_index_offset + 1; // +1 because the indices start at 0 but 0 is already 1 index on its own so we need + 1\n    }\n\n    // Precomputing the edges AB, AC of the emissives triangles\n//    parsed_scene.triangle_A.resize(parsed_scene.emissive_triangle_indices.size());\n//    parsed_scene.triangle_AB.resize(parsed_scene.emissive_triangle_indices.size());\n//    parsed_scene.triangle_AC.resize(parsed_scene.emissive_triangle_indices.size());\n//\n//    // Also pre-computing the edges AB and AC of that triangle for light sampling (for generating a point on the triangle)\n//#pragma omp parallel for\n//    for (int i = 0; i < parsed_scene.emissive_triangle_indices.size(); i++)\n//    {\n//\t\t// Getting the index of the triangle we're currently processing\n//\t\tint current_triangle_index = parsed_scene.emissive_triangle_indices[i];\n//\n//        float3 vertex_A = parsed_scene.vertices_positions[parsed_scene.triangles_indices[current_triangle_index * 3 + 0]];\n//        float3 vertex_B = parsed_scene.vertices_positions[parsed_scene.triangles_indices[current_triangle_index * 3 + 1]];\n//        float3 vertex_C = parsed_scene.vertices_positions[parsed_scene.triangles_indices[current_triangle_index * 3 + 2]];\n//\n//\t\tfloat3 AB = vertex_B - vertex_A;\n//\t\tfloat3 AC = vertex_C - vertex_A;\n//\n//        parsed_scene.triangle_A[i] = vertex_A;\n//        parsed_scene.triangle_AB[i] = AB;\n//        parsed_scene.triangle_AC[i] = AC;\n//    }\n}\n\nvoid ThreadFunctions::load_scene_compute_triangle_areas(Scene& parsed_scene)\n{\n\tint number_of_triangles = parsed_scene.triangles_vertex_indices.size() / 3;\n\n\tparsed_scene.triangle_areas.resize(number_of_triangles);\n\n#pragma omp parallel for\n    for (int triangle_index = 0; triangle_index < number_of_triangles; triangle_index++)\n    {\n        float3 vertex_A = parsed_scene.vertices_positions[parsed_scene.triangles_vertex_indices[triangle_index * 3 + 0]];\n        float3 vertex_B = parsed_scene.vertices_positions[parsed_scene.triangles_vertex_indices[triangle_index * 3 + 1]];\n        float3 vertex_C = parsed_scene.vertices_positions[parsed_scene.triangles_vertex_indices[triangle_index * 3 + 2]];\n\n        float3 AB = vertex_B - vertex_A;\n        float3 AC = vertex_C - vertex_A;\n\n        float3 normal = hippt::cross(AB, AC);\n        float length_normal = hippt::length(normal);\n        float area = hippt::length(normal) * 0.5f;\n\n        parsed_scene.triangle_areas[triangle_index] = area;\n    }\n}\n\nvoid ThreadFunctions::read_envmap(Image32Bit& hdr_image_out, const std::string& filepath, int wanted_channel_count, bool flip_Y)\n{\n    if (filepath.ends_with(\".hdr\"))\n        hdr_image_out = Image32Bit::read_image_hdr(filepath, wanted_channel_count, flip_Y);\n    else if (filepath.ends_with(\".exr\"))\n        hdr_image_out = Image32Bit::read_image_exr(filepath, flip_Y);\n\n    if (hdr_image_out.width == 0 || hdr_image_out.height == 0)\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_WARNING, \"Could not read envmap file: %s\", filepath.c_str());\n}\n"
  },
  {
    "path": "src/Threads/ThreadFunctions.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef THREAD_FUNCTIONS_H\n#define THREAD_FUNCTIONS_H\n\n#include \"Renderer/GPURenderer.h\"\n\nclass ThreadFunctions\n{\npublic:\n\tstatic void compile_kernel(std::shared_ptr<GPUKernel> kernel, std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets);\n\tstatic void compile_kernel_silent(std::shared_ptr<GPUKernel>, std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets);\n\tstatic void precompile_kernel(const std::string& kernel_function_name, const std::string& kernel_filepath, GPUKernelCompilerOptions options, std::shared_ptr<HIPRTOrochiCtx> hiprt_ctx, const std::vector<hiprtFuncNameSet>& func_name_sets);\n\n\tstatic void load_scene_texture(Scene& parsed_scene, std::string scene_path, const std::vector<std::pair<aiTextureType, std::string>>& tex_paths, const std::vector<int>& material_indices, int thread_index, int nb_threads);\n\n\t/**\n\t * Scans through the emissive meshes of the scene and adds the triangle of those emissive meshes\n\t * to the parsed_scene.emissive_triangles_indices field of the scene\n\t * \n\t * This function all precomputes the AB and AC edges of the triangles of the scenes for light sampling\n\t */\n\tstatic void load_scene_parse_emissive_triangles(const aiScene* scene, Scene& parsed_scene);\n\t/**\n\t * Computes the area of each triangle in the scene and stores it in the triangle_areas buffer\n\t */\n\tstatic void load_scene_compute_triangle_areas(Scene& parsed_scene);\n\n\t/**\n\t * Reads 'wanted_channel_count' channels of a 32 bit HDR image from 'filepath' and stores it in 'hdr_image_out'.\n\t * \n\t * If flip_y is true, the image will be postprocessed such that its origin is in the bottom left corner\n\t * (as used by OpenGL or CUDA for example)\n\t */\n\tstatic void read_envmap(Image32Bit& hdr_image_out, const std::string& filepath, int wanted_channel_count, bool flip_Y);\n};\n\n#endif\n"
  },
  {
    "path": "src/Threads/ThreadManager.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Threads/ThreadManager.h\"\n\n#include <deque>\n#include <memory>\n#include <thread>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\nstd::string ThreadManager::COMPILE_RAY_VOLUME_STATE_SIZE_KERNEL_KEY = \"CompileRayVolumeStateSizeKernelKey\";\nstd::string ThreadManager::COMPILE_NEE_PLUS_PLUS_FINALIZE_ACCUMULATION_KERNEL_KEY = \"CompileNeePlusPlusFinalizeAccumulationKernelKey\";\nstd::string ThreadManager::COMPILE_KERNELS_THREAD_KEY = \"CompileKernelPassesKey\";\nstd::string ThreadManager::GPU_RENDERER_PRECOMPILE_KERNELS_THREAD_KEY = \"GPURendererPrecompileKernelsKey\";\n\nstd::string ThreadManager::RENDER_WINDOW_CONSTRUCTOR = \"RenderWindowConstructor\";\nstd::string ThreadManager::RENDER_WINDOW_RENDERER_INITIAL_RESIZE = \"RenderWindowRendererInitialResize\";\n\nstd::string ThreadManager::RENDERER_SET_ENVMAP = \"RendererSetEnvmapKey\";\nstd::string ThreadManager::RENDERER_BUILD_BVH = \"RendererBuildBVH\";\nstd::string ThreadManager::RENDERER_UPLOAD_MATERIALS = \"RendererUploadMaterials\";\nstd::string ThreadManager::RENDERER_UPLOAD_TEXTURES = \"RendererUploadTextures\";\nstd::string ThreadManager::RENDERER_UPLOAD_EMISSIVE_TRIANGLES = \"RendererUploadEmissiveTriangles\";\nstd::string ThreadManager::RENDERER_UPLOAD_TRIANGLE_AREAS = \"RendererUploadTriangleAreas\";\nstd::string ThreadManager::RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE = \"RendererComputeEmissivesPowerAreaAliasTable\";\n\nstd::string ThreadManager::RENDERER_PRECOMPILE_KERNELS = \"RendererPrecompileKernel\";\nstd::string ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS = \"ReSTIRDIPrecompileKernel\";\n\nstd::string ThreadManager::SCENE_TEXTURES_LOADING_THREAD_KEY = \"TextureThreadsKey\";\nstd::string ThreadManager::SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES = \"ParseEmissiveTrianglesKey\";\nstd::string ThreadManager::SCENE_LOADING_COMPUTE_TRIANGLE_AREAS = \"ComputeTriangleAreas\";\nstd::string ThreadManager::ENVMAP_LOAD_FROM_DISK_THREAD = \"EnvmapLoadThreadsKey\";\n\nbool ThreadManager::m_monothread = false;\nstd::unordered_map<std::string, std::shared_ptr<void>> ThreadManager::m_threads_states;\nstd::unordered_map<std::string, std::vector<std::thread>> ThreadManager::m_threads_map;\nstd::unordered_map<std::string, std::mutex> ThreadManager::m_join_mutexes;\nstd::unordered_map<std::string, std::unordered_set<std::string>> ThreadManager::m_dependencies;\n"
  },
  {
    "path": "src/Threads/ThreadManager.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef THREAD_MANAGER_H\n#define THREAD_MANAGER_H\n\n#include <deque>\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <thread>\n#include <unordered_map>\n#include <unordered_set>\n#include <vector>\n\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\n// TODO make this class not a singleton but a global variable instead\n\n/**\n * Singleton class so that threads are accessible everywhere to be .join()\n * whenever we want without having to pass them around in function calls etc...\n * \n * This class works by creating threads and storing them in std::vectors.\n * Which std::vector is the thread going to be stored in depends on the key that is given.\n * Keys are basically used to give some kind of \"name\" to threads. The main use for that\n * is that all threads with the same key can be joined at the same time. So for example,\n * if you add 2 threads, both with the key 'MY_THREAD_KEY', they will both be added to\n * the same std::vector (these std::vectors are in the thread_map which is an attribute\n * of this class). Then, when you decide to join threads with the 'MY_THREAD_KEY' key,\n * all threads of the corresponding std::vector will be .join()\n */\nclass ThreadManager\n{\npublic:\n\tstatic std::string COMPILE_RAY_VOLUME_STATE_SIZE_KERNEL_KEY;\n\tstatic std::string COMPILE_NEE_PLUS_PLUS_FINALIZE_ACCUMULATION_KERNEL_KEY;\n\tstatic std::string COMPILE_KERNELS_THREAD_KEY;\n\t// Key for the thread that will ** launch ** the threads that will precompile kernels\n\t// in the background (needed because ** launching ** the precompilation itself takes quite a\n\t// bit of time so we're doing that on a thread with this key\n\tstatic std::string GPU_RENDERER_PRECOMPILE_KERNELS_THREAD_KEY;\n\n\tstatic std::string RENDER_WINDOW_CONSTRUCTOR;\n\tstatic std::string RENDER_WINDOW_RENDERER_INITIAL_RESIZE;\n\n\tstatic std::string RENDERER_SET_ENVMAP;\n\tstatic std::string RENDERER_BUILD_BVH;\n\tstatic std::string RENDERER_UPLOAD_MATERIALS;\n\tstatic std::string RENDERER_UPLOAD_TEXTURES;\n\tstatic std::string RENDERER_UPLOAD_EMISSIVE_TRIANGLES;\n\tstatic std::string RENDERER_UPLOAD_TRIANGLE_AREAS;\n\tstatic std::string RENDERER_COMPUTE_EMISSIVES_POWER_ALIAS_TABLE;\n\n\tstatic std::string RENDERER_PRECOMPILE_KERNELS;\n\tstatic std::string RESTIR_DI_PRECOMPILE_KERNELS;\n\n\tstatic std::string SCENE_TEXTURES_LOADING_THREAD_KEY;\n\tstatic std::string SCENE_LOADING_PARSE_EMISSIVE_TRIANGLES;\n\tstatic std::string SCENE_LOADING_COMPUTE_TRIANGLE_AREAS;\n\tstatic std::string ENVMAP_LOAD_FROM_DISK_THREAD;\n\n\t/**\n\t * If the passed parameter is true, the ThreadManager will execute all\n\t * started threads on the main thread instead of on a separate thread.\n\t */\n\tstatic void set_monothread(bool is_monothread)\n\t{\n\t\tm_monothread = is_monothread;\n\t}\n\n\ttemplate <typename T>\n\tstatic void set_thread_data(const std::string& key, std::shared_ptr<T> state)\n\t{\n\t\tm_threads_states[key] = std::static_pointer_cast<void>(state);\n\t}\n\n\ttemplate <class _Fn, class... _Args>\n\tstatic void start_thread(std::string key, _Fn function, _Args... args)\n\t{\n\t\tconst std::unordered_set<std::string>& dependencies = m_dependencies[key];\n\t\tif (!dependencies.empty())\n\t\t\tstart_with_dependencies(dependencies, key, function, args...);\n\t\telse\n\t\t{\n\t\t\tif (m_monothread)\n\t\t\t{\n\t\t\t\tstart_serial_thread(key, function, args...);\n\n\t\t\t\t// Creates the entry in the map if it doesn't exist. Doesn't do anything if it already exists.\n\t\t\t\t// This is so that other parts of the ThreadManager don't scream when trying to join threads\n\t\t\t\t// that haven't been started (\"started\" meaning that there is an entry in the map) for example\n\t\t\t\tbool empty = m_threads_map[key].empty();\n\t\t\t}\n\t\t\telse\n\t\t\t\t// Starting the thread and adding it to the list of threads for the given key\n\t\t\t\tm_threads_map[key].push_back(std::thread(function, args...));\n\t\t}\n\t}\n\n\t/**\n\t * This function starts a thread on the main thread i.e. not asynchronously and waits for\n\t * the completion of the given function before returning\n\t */\n\ttemplate <class _Fn, class... _Args>\n\tstatic void start_serial_thread(std::string key, _Fn function, _Args... args)\n\t{\n\t\t// Creating an entry in the map to 'fake' that we've started a thread\n\t\tbool empty = m_threads_map[key].empty();\n\n\t\tfunction(args...);\n\t}\n\n\tstatic void join_threads(const std::string& key)\n\t{\n\t\tstd::lock_guard<std::mutex> lock(m_join_mutexes[key]);\n\n\t\tauto find = m_threads_map.find(key);\n\t\tif (find != m_threads_map.end())\n\t\t{\n\t\t\tif (find->second.empty())\n\t\t\t\t// No threads to wait for\n\t\t\t\treturn;\n\n\t\t\tfor (std::thread& thread : find->second)\n\t\t\t{\n\t\t\t\t// TODO: This is just for debugging. \n\t\t\t\t// There seems to be some very rare bug in the ThreadManager where sometimes, \n\t\t\t\t// we're trying to join (with thread.join()) below a thread that has a NULL\n\t\t\t\t// handle from the 'ParseEmissiveTrianglesKey' thread key\n\t\t\t\t//\n\t\t\t\t// UPDATE: This seems to happen when we're calling join_all_threads() while\n\t\t\t\t// we're are still starting some thread with dependencies: we end up in a situation where we're\n\t\t\t\t// trying to join on a dependecy that has already been joined by join_all_threads() so we're going to need\n\t\t\t\t// some kind of way for join_all_threads() to wait for all threads to at least have started\n\t\t\t\tif (thread.native_handle() == 0)\n\t\t\t\t\tUtils::debugbreak();\n\n\t\t\t\tif (thread.joinable())\n\t\t\t\t\tthread.join();\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Trying to joing threads with key \\\"%s\\\" but no threads have been started with this key.\", key.c_str());\n\n\t\t\treturn;\n\t\t}\n\n\t\tm_threads_map[key].clear();\n\t}\n\n\t/**\n\t * Joins all the threads that have been started so far except the threads\n\t * launch with a key in the 'execeptions' vector passed as parameter\n\t */\n\tstatic void join_all_threads(const std::unordered_set<std::string>& exceptions = {})\n\t{\n\t\t// Joining all the threads and their dependencies\n\t\tfor (const auto& key_to_threads : m_threads_map)\n\t\t{\n\t\t\tstd::deque<std::string> dependencies_to_wait_for;\n\t\t\tstd::deque<std::string> dependencies_to_analyze;\n\n\t\t\tconst std::string& thread_key = key_to_threads.first;\n\t\t\tif (exceptions.find(thread_key) != exceptions.end())\n\t\t\t\t// This thread is in the exception list. Not joining these threads\n\t\t\t\tcontinue;\n\n\t\t\tif (!m_dependencies[thread_key].empty())\n\t\t\t\tfor (const std::string& dependency : m_dependencies[thread_key])\n\t\t\t\t\tdependencies_to_analyze.push_back(dependency);\n\t\t\t// Pushing the thread key itself we want to wait for and then we'll\n\t\t\t// push its dependencies in front of it so that we wait for the dependencies first\n\t\t\tdependencies_to_wait_for.push_front(thread_key);\n\n\t\t\twhile (!dependencies_to_analyze.empty())\n\t\t\t{\n\t\t\t\tstd::string new_dependency = dependencies_to_analyze.front();\n\t\t\t\tdependencies_to_analyze.pop_front();\n\t\t\t\tdependencies_to_wait_for.push_front(new_dependency);\n\n\t\t\t\tconst std::unordered_set<std::string>& dependencies = m_dependencies[new_dependency];\n\t\t\t\tfor (const std::string& dependency : dependencies)\n\t\t\t\t\tdependencies_to_analyze.push_front(dependency);\n\t\t\t}\n\n\t\t\tstd::unordered_set<std::string> dependencies_already_joined;\n\t\t\tfor (const std::string& dependency : dependencies_to_wait_for)\n\t\t\t{\n\t\t\t\tif (dependencies_already_joined.find(dependency) == dependencies_already_joined.end())\n\t\t\t\t{\n\t\t\t\t\t// Dependency not joined yet\n\t\t\t\t\tdependencies_already_joined.insert(dependency);\n\t\t\t\t\tjoin_threads(dependency);\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tstatic void detach_threads(const std::string& key)\n\t{\n\t\tauto find = m_threads_map.find(key);\n\t\tif (find == m_threads_map.end())\n\t\t\treturn;\n\n\t\tfor (std::thread& thread : find->second)\n\t\t\tif (thread.joinable())\n\t\t\t\tthread.detach();\n\t}\n\n\t/**\n\t * Adds a dependecy on 'dependency_key' from 'key' such that all the threads started with key\n\t * 'key' only start after all threads from 'dependency_key' are finished\n\t */\n\tstatic void add_dependency(const std::string& key, const std::string& dependency_key)\n\t{\n\t\tm_dependencies[key].insert(dependency_key);\n\t}\n\nprivate:\n\ttemplate <class _Fn, class... _Args>\n\tstatic void start_with_dependencies(const std::unordered_set<std::string>& dependencies, const std::string& thread_key_to_start, _Fn function, _Args... args)\n\t{\n\t\t// These threads have a dependency\n\n\t\t// Executing the given function after waiting for the dependencies\n\t\tif (m_monothread)\n\t\t{\n\t\t\t// Waiting for the dependencies before starting the thread\n\t\t\twait_for_dependencies(dependencies);\n\n\t\t\tstart_serial_thread(thread_key_to_start, function, args...);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Starting a thread that will wait for the dependencies before calling the given function\n\t\t\tm_threads_map[thread_key_to_start].push_back(std::thread([thread_key_to_start, dependencies, function, args...]() \n\t\t\t{\n\t\t\t\twait_for_dependencies(dependencies);\n\n\t\t\t\tstd::thread function_thread(function, args...);\n\t\t\t\tfunction_thread.join();\n\t\t\t}));\n\t\t}\n\t}\n\n\tstatic void wait_for_dependencies(const std::unordered_set<std::string>& dependencies)\n\t{\n\t\tfor (const std::string& dependency : dependencies)\n\t\t\tjoin_threads(dependency);\n\t}\n\nprivate:\n\t// If true, the ThreadManager will execute all threads serially\n\tstatic bool m_monothread;\n\n\t// The states are used to keep the data that the threads need alive\n\tstatic std::unordered_map<std::string, std::shared_ptr<void>> m_threads_states;\n\n\t// The ThreadManager can hold as many thread as we want and to find the thread\n\t// we want amongst all the threads stored, we use keys, hence the unordered_map\n\tstatic std::unordered_map<std::string, std::vector<std::thread>> m_threads_map;\n\n\t// Because of the dependency management system, it may be possible that we call .join()\n\t// on the same thread (or threads with the same thread key) from multiple threads.\n\t// Calling .join() concurrently on the same thread is likely to result in a race condition\n\t// so we need a synchronization system, using mutexes\n\tstatic std::unordered_map<std::string, std::mutex> m_join_mutexes;\n\n\t// For each thread key, maps to a vector of the dependencies of these threads\n\t// (thread with the thread key given as key to the map)\n\tstatic std::unordered_map<std::string, std::unordered_set<std::string>> m_dependencies;\n};\n\n#endif\n"
  },
  {
    "path": "src/Threads/ThreadState.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef THREAD_STATE_H\n#define THREAD_STATE_H\n\nstruct TextureLoadingThreadState\n{\n    std::vector<std::pair<aiTextureType, std::string>> texture_paths;\n    std::vector<int> material_indices;\n\n    std::string scene_filepath;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ApplicationSettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef APPLICATION_SETTINGS_H\n#define APPLICATION_SETTINGS_H\n\n#include <string>\n#include <vector>\n\n#include \"UI/DisplayView/DisplaySettings.h\"\n#include \"UI/DisplayView/DisplayViewEnum.h\"\n\n#include \"HostDeviceCommon/RenderSettings.h\"\n\nstruct ApplicationSettings\n{\n\tstatic constexpr bool DENOISER_USE_INTEROP_BUFFERS_DEFAULT = false;\n\n\tbool enable_denoising = false;\n\tbool denoiser_use_interop_buffers = DENOISER_USE_INTEROP_BUFFERS_DEFAULT;\n\tbool denoiser_use_albedo = true;\n\tbool denoiser_denoise_albedo = true;\n\tbool denoiser_use_normals = true;\n\tbool denoiser_denoise_normals = true;\n\t// How many samples were we at when we last denoised a frame\n\tint last_denoised_sample_count = -1;\n\t// How many microseconds did it take to denoise (last time we denoised)?\n\tfloat last_denoised_duration = 0.0f;\n\t// Denoise only when that maximum sample count is reached\n\tbool denoise_when_rendering_done = true;\n\t// How many frames to wait for before denoising (this basically reduces \n\t// the performance penalty of denoising each frame).\n\tint denoiser_sample_skip = 0;\n\t// If the denoiser settings changed since last frame\n\tbool denoiser_settings_changed = false;\n\n\t// How much to divide the rotation by when the mouse\n\t// has been dragged over the window to move the camera\n\t// This is necessary because if 1 pixel of movement equalled\n\t// 1 degree of rotation, it would be way too fast!\n\tdouble view_rotation_sldwn_x = 3.5f, view_rotation_sldwn_y = 3.5f;\n\n\t// How much to scale the render resolution by. \n\t// For example, if == 2, and the viewport currently is 1280*720, \n\t// the path tracer will compute a 2560*1440 image and display it\n\t// in the 1280*720 viewport\n\tfloat render_resolution_scale = 1.0f;\n\t// This variable is meant to keep the GPU busy when using \"automatic number of samples\"\n\t// per frame. The idea is to adjust the number of samples per frame such that the GPU\n\t// always has a bunch of work to do.\n\t// For example, let's say that after a while, the adaptive sampling has judged that only\n\t// 1000 pixels are left to converge out of the ~2M of 1080p image. 1000 pixels to ray trace\n\t// is a joke for the GPU. It's going to be extremely fast. So fast that the application is\n\t// going to be CPU bound for displaying the image etc...\n\t// To avoid being CPU bound, we adjust the work of the GPU such that it still has a significant\n\t// amount of work to process.\n\t// The amount of work is adjusted by adjusting the number of samples per frame. We adjust the\n\t// samples per frame such that the GPU takes (1000ms / target_GPU_framerate) milliseconds\n\t// to compute a frame\n\tfloat target_GPU_framerate = 10.0f;\n\t// If > 0.0f, stalls the GPU for a certain amount of time (based on the percentage and the\n\t// time taken to render the last frame). This feature is only there to help limit GPU heating\n\t// at the cost of longer render times\n\tfloat GPU_stall_percentage = 0.0f;\n\n\t// Whether or not to keep the same resolution on\n\t// viewport rescale. This means that the render resolution\n\t// scale will be automatically adjusted\n\t// This is useful if you want a bigger window on your desktop\n\t// without having the resolution going up and your GPU kneeling in pain\n\tbool keep_same_resolution = false;\n\n\t// When keep_same_resolution = true, we're going to automatically \n\t// adjust the resolution scaling so that the viewport_width * resolution_scaling\n\t// and viewport_height * resolution_scaling = target_width and target_height\n\t// respectively. The values of target_width and target_height are set when the\n\t// user ticks the 'keep same resolution' checkbox in ImGui\n\tint target_width = 0, target_height = 0;\n\n\t// We stop rendering when this number of sample is reached.\n\t// 0 is no limit\n\tint max_sample_count = 0;\n\t// We stop rendering when the render has been running for that long.\n\t// In seconds. 0 is no limit\n\tfloat max_render_time = 0.0f;\n\n\t// if true, the number of samples per frame will be adjusted automatically to target 20 FPS. \n\t// This is meant to keep the GPU busy mostly when adaptive sampling is on.\n\t// This is because with adaptive sampling on, FPS will keep increasing as the number of\n\t// pixels that yet have to converge decreases. And with high FPS count, we get the risk\n\t// of being CPU bound since we'll have to display many frames per second.\n\tbool auto_sample_per_frame = true;\n\n\t// How many samples to render before evaluating the number of pixels that have reached\n\t// the noise threshold.\n\t// \n\t// This setting only applies to the \"pixel stop noise threshold\" feature.\n\t// It does not apply to adaptive sampling.\n\t// Adaptive sampling has its own minimum sample count\n\tint pixel_stop_noise_threshold_min_sample_count = 64;\n};\n\n#endif"
  },
  {
    "path": "src/UI/ApplicationState.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef APPLICATION_STATE_H\n#define APPLICATION_STATE_H\n\nstruct ApplicationState\n{\n\tfloat last_CPU_frame_delta_time_ms = 0.0f;\n\t// GLFW timestamp of when was the last time that we submitted a frame to render to the GPU.\n\tuint64_t last_GPU_submit_time = 0;\n\t// How long the current render has been running for in milliseconds\n\tfloat current_render_time_ms = 0.0f;\n\t// Samples per second (computed at each frame based on the number of\n\t// samples per frame and the time to render the last frame)\n\tfloat samples_per_second = 0.0f;\n\t// Set to true if some settings of the render changed and we need\n\t// to restart rendering from sample 0\n\tbool render_dirty = true;\n\t// If true, this means that the user was interacting with the camera\n\t// at last frame\n\tbool interacting_last_frame = false;\n\n\t// How long in milliseconds do we still have to stall the GPU for\n\tfloat GPU_stall_duration_left = 0;\n\n\t// How many times renderer->render() was called since the last dirty frame.\n\tint frame_number = 0;\n\n\t// If true, the viewport is going to be refreshed next frame no matter what\n\tbool force_viewport_refresh = false;\n\t// How long has passed since the last time we \"uploaded\" the renderer\n\t// frame buffer to OpenGL for display.\n\t// \n\t// This variable is used to minimize how often we upload to OpenGL because\n\t// all of that is expensive.\n\t//\n\t// This is only used for offline rendering and this will have the effect\n\t// of updating the viewport only once every few seconds to save resources\n\tuint64_t last_viewport_refresh_timestamp = 0;\n};\n\n#endif"
  },
  {
    "path": "src/UI/DisplayView/DisplaySettings.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DISPLAY_SETTINGS_H\n#define DISPLAY_SETTINGS_H\n\nstruct DisplaySettings\n{\n\t// If 1.0f, 100% of the denoised result is displayed in the viewport.\n\t// If 0.0f, 100% of the noisy framebuffer is displayed in the viewport\n\t// Linearly interpoalted between the two for intermediate values\n\tfloat denoiser_blend = 1.0f;\n\t// Overrides the blending factor for the blend-2-textures display shader\n\t// 0.0f displays 100% of texture 1.\n\t// 1.0f gives 100% of texture 2.\n\t// -1.0f disables the override\n\tfloat blend_override = -1.0f;\n\n\t// Whether or not to do tonemapping for display fragment shader that support it\n\tbool do_tonemapping = true;\n\t// Tone mapping gamma\n\tfloat tone_mapping_gamma = 2.2f;\n\t// Tone mapping exposure\n\tfloat tone_mapping_exposure = 1.8f;\n\n\t// If true, the white furnace threshold shader will display\n\t// pixel that lose energy as green. Pixels will not be highlighted\n\t// if false\n\tbool white_furnace_display_use_low_threshold = false;\n\t// If true, the white furnace threshold shader will display\n\t// pixel that gain energy as red. Pixels will not be highlighted\n\t// if false\n\tbool white_furnace_display_use_high_threshold = true;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayTextureType.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DISPLAY_TEXTURE_TYPE_H\n#define DISPLAY_TEXTURE_TYPE_H\n\n#include \"GL/glew.h\"\n\nclass DisplayTextureType\n{\npublic:\n\tenum Value\n\t{\n\t\tUNINITIALIZED,\n\t\tFLOAT3,\n\t\tINT\n\t};\n\n\tconstexpr DisplayTextureType() : m_value(Value::FLOAT3) { }\n\tconstexpr DisplayTextureType(Value val) : m_value(val) { }\n\n\tGLint get_gl_internal_format()\n\t{\n\t\tswitch (m_value)\n\t\t{\n\t\tcase DisplayTextureType::FLOAT3:\n\t\t\treturn GL_RGB32F;\n\n\t\tcase DisplayTextureType::INT:\n\t\t\treturn GL_R32I;\n\n\t\tdefault:\n\t\t\tthrow std::runtime_error(\"Invalid value of DisplayTextureType\");\n\t\t}\n\t}\n\n\tGLenum get_gl_format()\n\t{\n\t\tswitch (m_value)\n\t\t{\n\t\tcase DisplayTextureType::FLOAT3:\n\t\t\treturn GL_RGB;\n\n\t\tcase DisplayTextureType::INT:\n\t\t\treturn GL_RED_INTEGER;\n\n\t\tdefault:\n\t\t\tthrow std::runtime_error(\"Invalid value of DisplayTextureType\");\n\t\t}\n\t}\n\n\tGLenum get_gl_type()\n\t{\n\t\tswitch (m_value)\n\t\t{\n\t\tcase DisplayTextureType::FLOAT3:\n\t\t\treturn GL_FLOAT;\n\n\t\tcase DisplayTextureType::INT:\n\t\t\treturn GL_INT;\n\n\t\tdefault:\n\t\t\tthrow std::runtime_error(\"Invalid value of DisplayTextureType\");\n\t\t}\n\t}\n\n\tsize_t sizeof_type()\n\t{\n\t\tswitch (m_value)\n\t\t{\n\t\tcase DisplayTextureType::FLOAT3:\n\t\t\treturn sizeof(float) * 3;\n\n\t\tcase DisplayTextureType::INT:\n\t\t\treturn sizeof(int);\n\n\t\tdefault:\n\t\t\tthrow std::runtime_error(\"Invalid value of DisplayTextureType\");\n\t\t}\n\t}\n\n\tbool operator ==(const DisplayTextureType& other) { return m_value == other.m_value; }\n\tbool operator !=(const DisplayTextureType& other) { return m_value != other.m_value; }\n\nprivate:\n\tValue m_value;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayView.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/DisplayView/DisplayView.h\"\n\nDisplayView::DisplayView(DisplayViewType display_view_type, std::shared_ptr<OpenGLProgram> display_program)\n{\n\tm_display_view_type = display_view_type;\n\tm_display_program = display_program;\n}\n\nstd::shared_ptr<OpenGLProgram> DisplayView::get_display_program()\n{\n\treturn m_display_program;\n}\n\nDisplayViewType DisplayView::get_display_view_type() const\n{\n\treturn m_display_view_type;\n}\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayView.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DISPLAY_VIEW_H\n#define DISPLAY_VIEW_H\n\n#include \"OpenGL/OpenGLProgram.h\"\n#include \"UI/DisplayView/DisplayTextureType.h\"\n#include \"UI/DisplayView/DisplayViewEnum.h\"\n\n#include <memory>\n\nclass DisplayView\n{\npublic:\n\tDisplayView() {};\n\tDisplayView(DisplayViewType display_view_type, std::shared_ptr<OpenGLProgram> display_program);\n\n\tstd::shared_ptr<OpenGLProgram> get_display_program();\n\tDisplayViewType get_display_view_type() const;\n\nprivate:\n\t// What display view type is currently displayed by the system\n\tDisplayViewType m_display_view_type = DisplayViewType::DEFAULT;\n\n\t// Fragment shader + vertex shader used for displaying the view on the viewport\n\tstd::shared_ptr<OpenGLProgram> m_display_program = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayViewEnum.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DISPLAY_VIEW_ENUM_H\n#define DISPLAY_VIEW_ENUM_H\n\n/*\n * Enum used to 'switch' between what to display in the viewport\n */\nenum DisplayViewType\n{\n\tDEFAULT,\n\tGMON_BLEND,\n\tDENOISED_BLEND,\n\tDISPLAY_DENOISER_NORMALS,\n\tDISPLAY_DENOISER_ALBEDO,\n\tPIXEL_CONVERGENCE_HEATMAP,\n\tPIXEL_CONVERGED_MAP,\n\tWHITE_FURNACE_THRESHOLD,\n\tUNDEFINED\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayViewSystem.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/ApplicationSettings.h\"\n#include \"UI/DisplayView/DisplayViewSystem.h\"\n#include \"UI/ImGui/ImGuiLogWindow.h\"\n#include \"UI/RenderWindow.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nDisplayViewSystem::DisplayViewSystem(std::shared_ptr<GPURenderer> renderer, RenderWindow* render_window)\n{\n\tm_renderer = renderer;\n\tm_render_window = render_window;\n\n\t// Creating the texture that will contain the path traced data to be displayed\n\t// by the shader.\n\tglGenTextures(1, &m_display_texture_1.first);\n\tglGenTextures(1, &m_display_texture_2.first);\n\n\t// This empty VAO is necessary on NVIDIA drivers even though\n\t// we're hardcoding our full screen quad in the vertex shader\n\tglCreateVertexArrays(1, &m_vao);\n\n\tOpenGLShader fullscreen_quad_vertex_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/fullscreen_quad.vert\", OpenGLShader::VERTEX_SHADER);\n\tOpenGLShader default_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/default_display.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader blend_2_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/blend_2_display.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader normal_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/normal_display.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader albedo_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/albedo_display.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader adaptive_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/heatmap_int.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader pixel_converged_display_fragment_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/boolmap_int.frag\", OpenGLShader::FRAGMENT_SHADER);\n\tOpenGLShader white_furnace_threshold_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/white_furnace_threshold.frag\", OpenGLShader::FRAGMENT_SHADER);\n\n\t// Making shared_ptr<OpenGLProgram>s here because multiple display views may share the same OpenGLProgram\n\tstd::shared_ptr<OpenGLProgram> default_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, default_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> blend_2_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, blend_2_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> normal_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, normal_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> albedo_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, albedo_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> pixel_convergence_heatmap_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, adaptive_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> pixel_converged_display_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, pixel_converged_display_fragment_shader);\n\tstd::shared_ptr<OpenGLProgram> white_furnace_threshold_program = std::make_shared<OpenGLProgram>(fullscreen_quad_vertex_shader, white_furnace_threshold_shader);\n\n\t// Creating all the display views\n\tDisplayView default_display_view = DisplayView(DisplayViewType::DEFAULT, default_display_program);\n\tDisplayView gmon_blend_display_view = DisplayView(DisplayViewType::GMON_BLEND, blend_2_display_program);\n\tDisplayView denoise_blend_display_view = DisplayView(DisplayViewType::DENOISED_BLEND, blend_2_display_program);\n\tDisplayView normals_display_view = DisplayView(DisplayViewType::DISPLAY_DENOISER_NORMALS, normal_display_program);\n\tDisplayView albedo_display_view = DisplayView(DisplayViewType::DISPLAY_DENOISER_ALBEDO, albedo_display_program);\n\tDisplayView pixel_convergence_heatmap_display_view = DisplayView(DisplayViewType::PIXEL_CONVERGENCE_HEATMAP, pixel_convergence_heatmap_display_program);\n\tDisplayView pixel_converged_display_view = DisplayView(DisplayViewType::PIXEL_CONVERGED_MAP, pixel_converged_display_program);\n\tDisplayView white_furnace_threshold_view = DisplayView(DisplayViewType::WHITE_FURNACE_THRESHOLD, white_furnace_threshold_program);\n\n\t// Adding the display views to the map\n\tm_display_views[DisplayViewType::DEFAULT] = default_display_view;\n\tm_display_views[DisplayViewType::GMON_BLEND] = gmon_blend_display_view;\n\tm_display_views[DisplayViewType::DENOISED_BLEND] = denoise_blend_display_view;\n\tm_display_views[DisplayViewType::DISPLAY_DENOISER_NORMALS] = normals_display_view;\n\tm_display_views[DisplayViewType::DISPLAY_DENOISER_ALBEDO] = albedo_display_view;\n\tm_display_views[DisplayViewType::PIXEL_CONVERGENCE_HEATMAP] = pixel_convergence_heatmap_display_view;\n\tm_display_views[DisplayViewType::PIXEL_CONVERGED_MAP] = pixel_converged_display_view;\n\tm_display_views[DisplayViewType::WHITE_FURNACE_THRESHOLD] = white_furnace_threshold_view;\n\n\t// Denoiser blend by default if denoising enabled. Default view otherwise\n\tDisplayViewType default_display_view_type = DisplayViewType::DEFAULT;\n\tif (m_render_window->get_application_settings()->enable_denoising)\n\t\tdefault_display_view_type = DisplayViewType::DENOISED_BLEND;\n\telse if (m_renderer->get_gmon_render_pass()->is_render_pass_used())\n\t\tdefault_display_view_type = DisplayViewType::GMON_BLEND;\n\telse \n\t\tdefault_display_view_type = DisplayViewType::DEFAULT;\n\n\tqueue_display_view_change(default_display_view_type);\n\tconfigure_framebuffer();\n}\n\nDisplayViewSystem::~DisplayViewSystem()\n{\n\tglDeleteTextures(1, &m_display_texture_1.first);\n\tglDeleteTextures(1, &m_display_texture_2.first);\n\tglDeleteVertexArrays(1, &m_vao);\n}\n\nvoid DisplayViewSystem::configure_framebuffer()\n{\n\tglCreateFramebuffers(1, &m_framebuffer);\n\tglBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);\n\n\t// Creating the texture for drawing to the FBO\n\tglGenTextures(1, &m_fbo_texture);\n\tglBindTexture(GL_TEXTURE_2D, m_fbo_texture);\n\n\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_render_window->get_width(), m_render_window->get_height(), 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);\n\n\t// GL_NEAREST because we don't want to linearly interpolate between those beautiful pixels, THAT'S DISGUSTING!\n\t// We want maximum monte carlo noise crispiness!\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n\n\tglFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_fbo_texture, 0);\n\n\tif (glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE)\n\t{\n\t\t// Procedes with a victory dance: Dance dance dance dance\n\t\treturn;\n\t}\n\telse\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Incomplete framebuffer in DisplayViewSystem!\");\n\n\t\tUtils::debugbreak();\n\t\tstd::exit(1);\n\t}\n}\n\nvoid DisplayViewSystem::resize_framebuffer()\n{\n\tglDeleteTextures(1, &m_fbo_texture);\n\tglGenTextures(1, &m_fbo_texture);\n\tglBindTexture(GL_TEXTURE_2D, m_fbo_texture);\n\n\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, m_render_window->get_width(), m_render_window->get_height(), 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);\n\n\t// GL_NEAREST because we don't want to linearly interpolate between those beautiful pixels, THAT'S DISGUSTING!\n\t// We want maximum monte carlo noise crispiness!\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n\n\tglBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);\n\tglFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_fbo_texture, 0);\n}\n\nbool DisplayViewSystem::update_selected_display_view()\n{\n\tif (current_display_view_needs_adaptive_sampling_buffers()\n\t&& !m_render_window->get_renderer()->get_render_settings().has_access_to_adaptive_sampling_buffers())\n\t\t// If the adaptive sampling heatmap is selected as the current view but\n\t\t// the adaptive sampling buffers are no longer available (after a change\n\t\t// to ImGui for example), we need to switch out of the adaptive sampling\n\t\t// view because we don't have the buffers to display it anymore\n\t\tm_queued_display_view_change = DisplayViewType::DEFAULT;\n\n\tif (m_queued_display_view_change != DisplayViewType::UNDEFINED)\n\t{\n\t\t// Adjusting the denoiser setting according to the selected view\n\t\t// so if the user just selected the denoiser blend display view,\n\t\t// enabling the denoising\n\t\t//\n\t\t// If the user changed the view and this is not the denoiser blend view,\n\t\t// this disables denoising\n\t\t// m_render_window->get_application_settings()->enable_denoising = m_queued_display_view_change == DisplayViewType::DENOISED_BLEND;\n\n\t\tm_current_display_view = &m_display_views[m_queued_display_view_change];\n\n\t\tinternal_recreate_display_textures_from_display_view(m_queued_display_view_change);\n\n\t\tm_queued_display_view_change = DisplayViewType::UNDEFINED;\n\n\t\treturn true;\n\t}\n\n\thandle_automatic_display_view_changes();\n\t\n\treturn false;\n}\n\nvoid DisplayViewSystem::handle_automatic_display_view_changes()\n{\n\tif (m_current_display_view->get_display_view_type() == DisplayViewType::GMON_BLEND && !m_renderer->get_gmon_render_pass()->is_render_pass_used())\n\t{\n\t\t// But GMoN blend is used while GMoN isn't active, we need to go back to default\n\t\tqueue_display_view_change(DisplayViewType::DEFAULT);\n\t\tupdate_selected_display_view();\n\t}\n}\n\nbool DisplayViewSystem::current_display_view_needs_adaptive_sampling_buffers()\n{\n\treturn get_current_display_view_type() == DisplayViewType::PIXEL_CONVERGENCE_HEATMAP\n\t\t|| get_current_display_view_type() == DisplayViewType::PIXEL_CONVERGED_MAP;\n}\n\nvoid DisplayViewSystem::display()\n{\n\tTracyGpuZone(\"Display\");\n\n\tglBindFramebuffer(GL_FRAMEBUFFER, m_framebuffer);\n\n\t// Binding an empty VAO here (empty because we're hardcoding our full-screen quad vertices\n\t// in our vertex shader) because this is required on NVIDIA drivers\n\tglBindVertexArray(m_vao);\n\tglDrawArrays(GL_TRIANGLES, 0, 6);\n}\n\nDisplayViewType DisplayViewSystem::get_current_display_view_type()\n{\n\tif (m_current_display_view == nullptr)\n\t\treturn DisplayViewType::UNDEFINED;\n\n\treturn m_current_display_view->get_display_view_type();\n}\n\nconst DisplayView* DisplayViewSystem::get_current_display_view() const\n{\n\treturn m_current_display_view;\n}\n\nstd::shared_ptr<OpenGLProgram> DisplayViewSystem::get_active_display_program()\n{\n\treturn m_current_display_view->get_display_program();\n}\n\nDisplaySettings& DisplayViewSystem::get_display_settings()\n{\n\treturn m_display_settings;\n}\n\nvoid DisplayViewSystem::update_display_program_uniforms(const DisplayViewSystem* display_view_system, std::shared_ptr<OpenGLProgram> program, std::shared_ptr<GPURenderer> renderer, std::shared_ptr<ApplicationSettings> application_settings)\n{\n\tconst DisplayView* display_view = display_view_system->get_current_display_view();\n\tconst DisplaySettings& display_settings = display_view_system->m_display_settings;\n\t\n\tHIPRTRenderSettings render_settings = renderer->get_render_settings();\n\trender_settings.sample_number = std::max(1u, render_settings.sample_number); \n\n\tbool display_low_resolution = display_view_system->get_render_low_resolution();\n\tint render_low_resolution_scaling = display_low_resolution ? render_settings.render_low_resolution_scaling : 1;\n\n\tprogram->use();\n\n\tswitch (display_view->get_display_view_type())\n\t{\n\tcase DisplayViewType::DEFAULT:\n\t{\n\t\tint sample_number;\n\t\tif (application_settings->enable_denoising && application_settings->last_denoised_sample_count != -1)\n\t\t\t// If we have denoising enabled, the viewport may not be updated at each frame.\n\t\t\t// This means that we may be displaying the same denoised buffer for multiple frame\n\t\t\t// and that same denoised buffer is only going to have a given amount of samples accumulated\n\t\t\t// in it so we must you that number of samples for displaying otherwise things are going\n\t\t\t// to be too dark because we're going to be dividing the data of the denoised buffer by a\n\t\t\t// sample count that doesn't match\n\t\t\tsample_number = application_settings->last_denoised_sample_count;\n\t\telse if (renderer->get_gmon_render_pass()->is_render_pass_used())\n\t\t\tsample_number = renderer->get_gmon_render_pass()->get_last_recomputed_sample_count();\n\t\telse\n\t\t\tsample_number = render_settings.sample_number;\n\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_sample_number\", sample_number);\n\t\tprogram->set_uniform(\"u_do_tonemapping\", display_settings.do_tonemapping);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_gamma\", display_settings.tone_mapping_gamma);\n\t\tprogram->set_uniform(\"u_exposure\", display_settings.tone_mapping_exposure);\n\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::WHITE_FURNACE_THRESHOLD:\n\t{\n\t\tint sample_number;\n\t\tif (application_settings->enable_denoising && application_settings->last_denoised_sample_count != -1)\n\t\t\tsample_number = application_settings->last_denoised_sample_count;\n\t\telse\n\t\t\tsample_number = render_settings.sample_number;\n\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_sample_number\", sample_number);\n\t\tprogram->set_uniform(\"u_do_tonemapping\", display_settings.do_tonemapping);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_gamma\", display_settings.tone_mapping_gamma);\n\t\tprogram->set_uniform(\"u_exposure\", display_settings.tone_mapping_exposure);\n\t\tprogram->set_uniform(\"u_use_low_threshold\", display_settings.white_furnace_display_use_low_threshold);\n\t\tprogram->set_uniform(\"u_use_high_threshold\", display_settings.white_furnace_display_use_high_threshold);\n\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::GMON_BLEND:\n\t{\n\t\tint gmon_sample_number = renderer->get_gmon_render_pass()->get_last_recomputed_sample_count();\n\t\tint default_sample_number = render_settings.sample_number;\n\n\t\tprogram->set_uniform(\"u_blend_factor\", renderer->get_gmon_render_pass()->get_gmon_data().gmon_blend_factor);\n\t\tprogram->set_uniform(\"u_texture_1\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_texture_2\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2);\n\t\tprogram->set_uniform(\"u_sample_number_1\", default_sample_number);\n\t\tprogram->set_uniform(\"u_sample_number_2\", gmon_sample_number);\n\t\tprogram->set_uniform(\"u_do_tonemapping\", display_settings.do_tonemapping);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_gamma\", display_settings.tone_mapping_gamma);\n\t\tprogram->set_uniform(\"u_exposure\", display_settings.tone_mapping_exposure);\n\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::DENOISED_BLEND:\n\t{\n\t\tint noisy_sample_number;\n\t\tint denoised_sample_number;\n\n\t\tnoisy_sample_number = render_settings.sample_number;\n\t\tdenoised_sample_number = application_settings->last_denoised_sample_count;\n\n\t\tif (display_settings.blend_override != -1.0f)\n\t\t\tprogram->set_uniform(\"u_blend_factor\", display_settings.blend_override);\n\t\telse\n\t\t\tprogram->set_uniform(\"u_blend_factor\", display_settings.denoiser_blend);\n\t\tprogram->set_uniform(\"u_texture_1\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_texture_2\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2);\n\t\tprogram->set_uniform(\"u_sample_number_1\", noisy_sample_number);\n\t\tprogram->set_uniform(\"u_sample_number_2\", denoised_sample_number);\n\t\tprogram->set_uniform(\"u_do_tonemapping\", display_settings.do_tonemapping);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_gamma\", display_settings.tone_mapping_gamma);\n\t\tprogram->set_uniform(\"u_exposure\", display_settings.tone_mapping_exposure);\n\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::DISPLAY_DENOISER_ALBEDO:\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\n\t\tbreak;\n\n\tcase DisplayViewType::DISPLAY_DENOISER_NORMALS:\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_do_tonemapping\", display_settings.do_tonemapping);\n\t\tprogram->set_uniform(\"u_gamma\", display_settings.tone_mapping_gamma);\n\t\tprogram->set_uniform(\"u_exposure\", display_settings.tone_mapping_exposure);\n\n\t\tbreak;\n\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\t{\n\t\tstd::vector<ColorRGB32F> color_stops = { ColorRGB32F(0.0f, 0.0f, 1.0f), ColorRGB32F(0.0f, 1.0f, 0.0f), ColorRGB32F(1.0f, 0.0f, 0.0f) };\n\n\t\t// If we don't have adaptive sampling enabled, we want to display the convergence\n\t\t// of pixels as soon as possible so we set the min_val to 1. Otherwise, if we're using\n\t\t// adaptive sampling, we only have the convergence information after the minimum\n\t\t// adaptive sampling samples have been reached so we set that as the min_val\n\t\tfloat min_val = render_settings.enable_adaptive_sampling ? (float)render_settings.adaptive_sampling_min_samples : 1;\n\t\tfloat max_val = std::max((float)render_settings.sample_number, min_val);\n\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_color_stops\", 3, (float*)color_stops.data());\n\t\tprogram->set_uniform(\"u_nb_stops\", 3);\n\t\tprogram->set_uniform(\"u_min_val\", min_val);\n\t\tprogram->set_uniform(\"u_max_val\", max_val);\n\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\t{\n\t\tfloat min_val = render_settings.enable_adaptive_sampling ? (float)render_settings.adaptive_sampling_min_samples : 1;\n\n\t\t// If a pixel has a lower sample count than the threshold val, then it has converged\n\t\tfloat threshold_val = std::max((float)render_settings.sample_number, min_val);\n\n\t\tprogram->set_uniform(\"u_texture\", DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tprogram->set_uniform(\"u_resolution_scaling\", render_low_resolution_scaling);\n\t\tprogram->set_uniform(\"u_threshold_val\", threshold_val);\n\t\tbreak;\n\t}\n\n\tcase DisplayViewType::UNDEFINED:\n\t\tbreak;\n\t}\n}\n\nvoid DisplayViewSystem::update_current_display_program_uniforms()\n{\n\tDisplayViewSystem::update_display_program_uniforms(this, get_active_display_program(), m_renderer, m_render_window->get_application_settings());\n}\n\nvoid DisplayViewSystem::upload_relevant_buffers_to_texture()\n{\n\tDisplayViewType current_display_view_type = get_current_display_view_type();\n\n\tswitch (current_display_view_type)\n\t{\n\tcase DisplayViewType::GMON_BLEND:\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_default_interop_framebuffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_color_interop_framebuffer(), m_display_texture_2, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2);\n\t\tbreak;\n\n\tcase DisplayViewType::DENOISED_BLEND:\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_color_interop_framebuffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_denoised_interop_framebuffer(), m_display_texture_2, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2);\n\t\tbreak;\n\n\tcase DisplayViewType::DISPLAY_DENOISER_ALBEDO:\n\t\tif (m_render_window->get_application_settings()->denoiser_use_interop_buffers)\n\t\t\tinternal_upload_buffer_to_texture(m_renderer->get_denoiser_albedo_AOV_interop_buffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\telse\n\t\t\tinternal_upload_buffer_to_texture(m_renderer->get_denoiser_albedo_AOV_no_interop_buffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\n\t\tbreak;\n\n\tcase DisplayViewType::DISPLAY_DENOISER_NORMALS:\n\t\tif (m_render_window->get_application_settings()->denoiser_use_interop_buffers)\n\t\t\tinternal_upload_buffer_to_texture(m_renderer->get_denoiser_normals_AOV_interop_buffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\telse\n\t\t\tinternal_upload_buffer_to_texture(m_renderer->get_denoiser_normals_AOV_no_interop_buffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\n\t\tbreak;\n\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_pixels_converged_sample_count_buffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tbreak;\n\n\tcase DisplayViewType::DEFAULT:\n\tcase DisplayViewType::WHITE_FURNACE_THRESHOLD:\n\tdefault:\n\t\tinternal_upload_buffer_to_texture(m_renderer->get_default_interop_framebuffer(), m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1);\n\t\tbreak;\n\t}\n}\n\nvoid DisplayViewSystem::queue_display_view_change(DisplayViewType display_view)\n{\n\tm_queued_display_view_change = display_view;\n}\n\nvoid DisplayViewSystem::set_render_low_resolution(bool low_resolution_or_not)\n{\n\tm_displaying_low_resolution = low_resolution_or_not;\n}\n\nbool DisplayViewSystem::get_render_low_resolution() const\n{\n\treturn m_displaying_low_resolution;\n}\n\nvoid DisplayViewSystem::resize(int new_render_width, int new_render_height)\n{\n\tresize_framebuffer();\n\tinternal_recreate_display_texture(m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1, m_display_texture_1.second, new_render_width, new_render_height);\n\tinternal_recreate_display_texture(m_display_texture_2, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2, m_display_texture_2.second, new_render_width, new_render_height);\n}\n\nvoid DisplayViewSystem::internal_recreate_display_textures_from_display_view(DisplayViewType display_view)\n{\n\tDisplayTextureType texture_1_type_needed = DisplayTextureType::UNINITIALIZED;\n\tDisplayTextureType texture_2_type_needed = DisplayTextureType::UNINITIALIZED;\n\n\tswitch (display_view)\n\t{\n\tcase DisplayViewType::DEFAULT:\n\tcase DisplayViewType::DISPLAY_DENOISER_NORMALS:\n\tcase DisplayViewType::DISPLAY_DENOISER_ALBEDO:\n\tcase DisplayViewType::WHITE_FURNACE_THRESHOLD:\n\t\ttexture_1_type_needed = DisplayTextureType::FLOAT3;\n\t\tbreak;\n\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\t\ttexture_1_type_needed = DisplayTextureType::INT;\n\t\tbreak;\n\n\tcase DisplayViewType::GMON_BLEND:\n\tcase DisplayViewType::DENOISED_BLEND:\n\t\ttexture_1_type_needed = DisplayTextureType::FLOAT3;\n\t\ttexture_2_type_needed = DisplayTextureType::FLOAT3;\n\t\tbreak;\n\n\tdefault:\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Unhandled display texture type in 'internal_recreate_display_textures_from_display_view'\");\n\n\t\tUtils::debugbreak();\n\n\t\tbreak;\n\t}\n\n\tif (m_display_texture_1.second != texture_1_type_needed)\n\t\tinternal_recreate_display_texture(m_display_texture_1, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_1, texture_1_type_needed, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\n\tif (m_display_texture_2.second != texture_2_type_needed)\n\t\tinternal_recreate_display_texture(m_display_texture_2, DisplayViewSystem::DISPLAY_TEXTURE_UNIT_2, texture_2_type_needed, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n}\n\nvoid DisplayViewSystem::internal_recreate_display_texture(std::pair<GLuint, DisplayTextureType>& display_texture, GLenum display_texture_unit, DisplayTextureType new_texture_type, int width, int height)\n{\n\tbool freeing = false;\n\tif (new_texture_type == DisplayTextureType::UNINITIALIZED)\n\t{\n\t\tif (display_texture.second != DisplayTextureType::UNINITIALIZED)\n\t\t{\n\t\t\t// If the texture was valid before and we've given UNINITIALIZED as the new type, this means\n\t\t\t// that we're not using the texture anymore. We're going to queue_resize the texture to 1x1,\n\t\t\t// essentially freeing it but without really destroying the OpenGL object\n\t\t\twidth = height = 1;\n\n\t\t\t// Not changing the texture type, just resizing\n\t\t\tnew_texture_type = display_texture.second;\n\n\t\t\tfreeing = true;\n\t\t}\n\t\telse\n\t\t\t// Else, the texture is already UNINITIALIZED\n\t\t\treturn;\n\t}\n\n\tGLint internal_format = new_texture_type.get_gl_internal_format();\n\tGLenum format = new_texture_type.get_gl_format();\n\tGLenum type = new_texture_type.get_gl_type();\n\n\t// Making sure the buffer isn't bound\n\tglBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);\n\n\tglActiveTexture(GL_TEXTURE0 + display_texture_unit);\n\tglBindTexture(GL_TEXTURE_2D, display_texture.first);\n\tglTexImage2D(GL_TEXTURE_2D, 0, internal_format, width, height, 0, format, type, nullptr);\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);\n\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);\n\n\tif (freeing)\n\t\t// If we just freed the texture, setting it as UNINITIALIZED so that it is basically invalidated\n\t\t// and will be recreated correctly next time\n\t\tdisplay_texture.second = DisplayTextureType::UNINITIALIZED;\n\telse\n\t\tdisplay_texture.second = new_texture_type;\n}\n"
  },
  {
    "path": "src/UI/DisplayView/DisplayViewSystem.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef DISPLAY_VIEW_SYSTEM_H\n#define DISPLAY_VIEW_SYSTEM_H\n\n#include \"Renderer/GPURenderer.h\"\n#include \"OpenGL/OpenGLInteropBuffer.h\"\n#include \"UI/DisplayView/DisplayView.h\"\n#include \"UI/DisplayView/DisplayViewEnum.h\"\n\n#include <unordered_map>\n\nclass RenderWindow;\n\nclass DisplayViewSystem\n{\npublic:\n\t// Default texture unit for displaying most of things\n\tstatic constexpr int DISPLAY_TEXTURE_UNIT_1 = 1;\n\t// Second display texture used when we want to blend between two displays\n\tstatic constexpr int DISPLAY_TEXTURE_UNIT_2 = 2;\n\t// Texture unit reserved for the compute shader screenshoter\n\tstatic constexpr int DISPLAY_COMPUTE_IMAGE_UNIT = 3;\n\n\tDisplayViewSystem(std::shared_ptr<GPURenderer> renderer, RenderWindow* render_window);\n\t~DisplayViewSystem();\n\n\tvoid configure_framebuffer();\n\tvoid resize_framebuffer();\n\n\tDisplayViewType get_current_display_view_type();\n\tconst DisplayView* get_current_display_view() const;\n\tstd::shared_ptr<OpenGLProgram> get_active_display_program();\n\tDisplaySettings& get_display_settings();\n\n\t/**\n\t * Applies queued changes (such as changing the display view for example)\n\t * \n\t * Returns true if the display view was changed. False otherwise\n\t */\n\tbool update_selected_display_view();\n\n\t/**\n\t * Returns true if the current display view needs the adaptive sampling buffers for\n\t * displaying\n\t */\n\tbool current_display_view_needs_adaptive_sampling_buffers();\n\n\t/**\n\t * Displays the currently active texture view onto the viewport\n\t */\n\tvoid display();\n\n\t/**\n\t * Queues a change of display view that will take effect upon calling update()\n\t */\n\tvoid queue_display_view_change(DisplayViewType display_view);\n\n\tbool get_render_low_resolution() const;\n\t/**\n\t * Sets whether or not thje next display() call will display the given texture\n\t * as a low render resolution texture or not\n\t */\n\tvoid set_render_low_resolution(bool low_resolution_or_not);\n\n\tvoid resize(int new_render_width, int new_render_height);\n\n\t/**\n\t * Updates the uniforms of an arbitrary input program given the state of the renderer and the applications settings given\n\t */\n\tstatic void update_display_program_uniforms(const DisplayViewSystem* display_view_system, std::shared_ptr<OpenGLProgram> program, std::shared_ptr<GPURenderer> renderer, std::shared_ptr<ApplicationSettings> application_settings);\n\n\t/**\n\t * Updates the uniforms of the display program currently used by this display view system\n\t */\n\tvoid update_current_display_program_uniforms();\n\tvoid upload_relevant_buffers_to_texture();\n\nprivate:\n\ttemplate <typename T>\n\tvoid internal_upload_buffer_to_texture(std::shared_ptr<OpenGLInteropBuffer<T>> buffer, const std::pair<GLuint, DisplayTextureType>& display_texture, int texture_unit);\n\n\ttemplate<typename T>\n\tvoid internal_upload_buffer_to_texture(std::shared_ptr<OrochiBuffer<T>> buffer, const std::pair<GLuint, DisplayTextureType>& display_texture, int texture_unit);\n\n\t/*\n\t * This function ensures that the display texture is of the proper format\n\t * for the display view selected.\n\t *\n\t * For example, if the user decided to display normals in the viewport, we'll need\n\t * the display texture to be a float3 (RGB32F) texture. If the user is displaying\n\t * the adaptive sampling heatmap, we'll only need an integer texture.\n\t *\n\t * This function deletes/recreates the texture everytime its required format changes\n\t * (i.e. when the current texture was a float3 and we asked for an integer texture)\n\t because we don't want to keep every single possible texture in VRAM. This may cause\n\t * a (very) small stutter but that's probably expected since we're asking for a different view\n\t * to show up in the viewport\n\t */\n\tvoid internal_recreate_display_textures_from_display_view(DisplayViewType display_view);\n\tvoid internal_recreate_display_texture(std::pair<GLuint, DisplayTextureType>& display_texture, GLenum display_texture_unit, DisplayTextureType new_texture_type, int width, int height);\n\n\t/**\n\t * Automatically changes the display view used if some conditions are met (or not met).\n\t * \n\t * For example, if the current display view is \"GMoN Blend\" but the user disables GMoN, we\n\t * don't want to keep using the GMoN blend view so this function will change it automatically\n\t */\n\tvoid handle_automatic_display_view_changes();\n\n\n\n\t// All the different display view that can be used for displaying\n\tstd::unordered_map<DisplayViewType, DisplayView> m_display_views;\n\t// Index within the vector of the display view currently being used\n\tDisplayView* m_current_display_view = nullptr;\n\t// If != UNDEFINED, then someone has requested a display view change and the display view change will be applied upon calling update().\n\t// Why is this necessary and why not just change the DisplayView directly?\n\t//\t\t- Picture this scenario: we're currently displaying the default display view.\n\t//\t\t- The display view is immediately changed to the AdaptiveSamplingMap view.\n\t//\t\t- These two display views use different display texture types. The default display view\n\t//\t\t\tuses a float3 texture type whereas the AdaptiveSamplingMap view uses a int texture type\n\t//\t\t- Changing the display view will thus trigger a display texture re-creation (to change the type of the texture)\n\t//\t\t- This texture re-creation means that the current texture (which has just been recreated) contains no data\n\t//\t\t\tand data needs to be uploaded to it. However, data is only uploaded when a kernel frame render is completed\n\t//\t\t\t(and not at every RenderWindow run() loop iteration).\n\t//\t\t- If our current (asynchronous) kernel frame isn't completed, then we will keep displaying --> we will display\n\t//\t\t\twith a texture that didn't get new data uploaded to it --> black viewport\n\t//\t\t- This is why we need to queue the change so that the texture change is only made when a kernel frame is completed.\n\tDisplayViewType m_queued_display_view_change = DisplayViewType::UNDEFINED;\n\n\t// Whether or not the DisplayView used is going to be displaying at low resolution or not\n\tbool m_displaying_low_resolution = false;\n\n\t// Display textures & their display type\n\t// \n\t// The display type is the format of the texel of the texture used by the display program.\n\t// This is useful because we have several types of programs using several\n\t// types of textures. For example, displaying normals on the screen requires float3 textures\n\t// whereas displaying a heatmap requires only a texture whose texels are scalar (floats or ints).\n\t// This means that, depending on the display view selected, we're going to have to use the proper\n\t// OpenGL texture format type and that's what the DisplayTextureType is for.\n\t// \n\t// The textures should be the same resolution as the render resolution.\n\t// They have nothing to do with the resolution of the viewport.\n\t// \n\t// The first texture is used by the display program to draw on the fullscreen quad.\n\t// Also used as the first blending texture when a blending display view is selected\n\tstd::pair<GLuint, DisplayTextureType> m_display_texture_1 = { -1, DisplayTextureType::UNINITIALIZED };\n\n\t// Second display texture.\n\t// Used as the second texture for blending when a blending display view is selected\n\t// (used by the denoiser blending for example)\n\tstd::pair<GLuint, DisplayTextureType> m_display_texture_2 = { -1, DisplayTextureType::UNINITIALIZED };\n\n\t// We don't need a VAO because we're hardcoding our fullscreen\n\t// quad vertices in our vertex shader but we still need an empty/fake\n\t// VAO for NVIDIA drivers to avoid errors\n\tGLuint m_vao;\n\n\t// Framebuffer we're drawing. We're not directly drawing to the back buffer because we\n\t// want ImGui to do the drawing in one of its ImGui window\n\tGLuint m_framebuffer;\npublic:\n\tGLuint m_fbo_texture;\n\nprivate:\n\tDisplaySettings m_display_settings;\n\tstd::shared_ptr<GPURenderer> m_renderer = nullptr;\n\tRenderWindow* m_render_window = nullptr;\n};\n\ntemplate<typename T>\nvoid DisplayViewSystem::internal_upload_buffer_to_texture(std::shared_ptr<OpenGLInteropBuffer<T>> buffer, const std::pair<GLuint, DisplayTextureType>& display_texture, int texture_unit)\n{\n\tif (buffer == nullptr)\n\t\treturn;\n\n\tbuffer->unmap();\n\tbuffer->unpack_to_GL_texture(display_texture.first, GL_TEXTURE0 + texture_unit, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, display_texture.second);\n}\n\ntemplate<typename T>\nvoid DisplayViewSystem::internal_upload_buffer_to_texture(std::shared_ptr<OrochiBuffer<T>> buffer, const std::pair<GLuint, DisplayTextureType>& display_texture, int texture_unit)\n{\n\tif (buffer == nullptr)\n\t\treturn;\n\n\tbuffer->unpack_to_GL_texture(display_texture.first, GL_TEXTURE0 + texture_unit, m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y, display_texture.second);\n}\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiAnimationWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/ImGui/ImGuiAnimationWindow.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"imgui.h\"\n\nconst char* ImGuiAnimationWindow::TITLE = \"Animation\";\n\nvoid ImGuiAnimationWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n\n\tm_renderer = m_render_window->get_renderer();\n}\n\nvoid ImGuiAnimationWindow::draw()\n{\n\tImGui::Begin(ImGuiAnimationWindow::TITLE);\n\n\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n\n\tdraw_header();\n\tdraw_frame_sequence_rendering_panel();\n\tdraw_camera_panel();\n\tdraw_envmap_panel();\n\n\tImGui::PopItemWidth();\n\n\tImGui::End();\n}\n\nvoid ImGuiAnimationWindow::draw_header()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tRendererAnimationState& animation_state = m_renderer->get_animation_state();\n\n\tImGui::SeparatorText(\"General settings\");\n\tif (ImGui::Checkbox(\"Accumulate\", &render_settings.accumulate))\n\t{\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tif (!render_settings.accumulate)\n\t\t{\n\t\t\tm_render_window->get_application_settings()->auto_sample_per_frame = false;\n\t\t\trender_settings.samples_per_frame = 1;\n\t\t}\n\t}\n\n\tstd::string animation_button_text = animation_state.do_animations ? \"Disable animations\" : \"Enable animations\";\n\tif (ImGui::Button(animation_button_text.c_str()))\n\t\tanimation_state.do_animations = !animation_state.do_animations;\n\tif (m_renderer->get_render_settings().accumulate && !animation_state.is_rendering_frame_sequence)\n\t{\n\t\tImGui::TreePush(\"Animations info tree\");\n\t\tImGui::Text(\"Warning: \");\n\t\tImGuiRenderer::show_help_marker(\"Animations are not playing right now because\\n\"\n\t\t\t\t\t\"accumulation is on. Nothing can move while accumulation\\n\"\n\t\t\t\t\t\"is on unless you're rendering a frame sequence, in\\n\"\n\t\t\t\t\t\"which case animations will step forward after a frame\\n\"\n\t\t\t\t\t\"is rendered (converged according to the renderer settings).\");\n\t\tImGui::TreePop();\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n}\n\nvoid ImGuiAnimationWindow::draw_frame_sequence_rendering_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Frame Sequence Rendering\"))\n\t{\n\t\tRendererAnimationState& animation_state = m_renderer->get_animation_state();\n\n\t\tImGui::TreePush(\"Frame Sequence Rendering Tree\");\n\n\t\tImGui::Text(\"Currently at frame %d / %d\", animation_state.frames_rendered_so_far, animation_state.number_of_animation_frames);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tstatic int move_n_frames_forward = 0;\n\t\tImGui::InputInt(\"Move N frames forward\", &move_n_frames_forward); \n\t\tImGuiRenderer::show_help_marker(\"Advances all the animations N frames forward.\");\n\t\tImGui::SameLine();\n\t\tImGui::BeginDisabled(animation_state.do_animations == false);\n\t\tif (ImGui::Button(\"Go!\"))\n\t\t{\n\t\t\tbool can_step_backup = animation_state.can_step_animation;\n\n\t\t\tanimation_state.can_step_animation = true;\n\t\t\tfor (int i = 0; i < move_n_frames_forward; i++)\n\t\t\t\tm_renderer->step_animations(16.67f);\n\n\t\t\tanimation_state.can_step_animation = can_step_backup;\n\t\t\tanimation_state.frames_rendered_so_far += move_n_frames_forward;\n\t\t\tmove_n_frames_forward = 0;\n\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGui::EndDisabled();\n\t\tif (animation_state.do_animations == false)\n\t\t\tImGuiRenderer::show_help_marker(\"Disabled because animations are not enabled right now.\");\n\n\t\tif (ImGui::InputInt(\"Number of frames to render\", &animation_state.number_of_animation_frames))\n\t\t\tanimation_state.reset();\n\n\t\tImGui::BeginDisabled(!m_renderer->get_render_settings().accumulate);\n\t\tstd::string start_rendering_animation_text = animation_state.is_rendering_frame_sequence ? \"Stop rendering frame sequence\" : \"Start rendering frame sequence\";\n\t\tif (ImGui::Button(start_rendering_animation_text.c_str()))\n\t\t{\n\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tanimation_state.is_rendering_frame_sequence = !animation_state.is_rendering_frame_sequence;\n\t\t\tanimation_state.reset();\n\t\t}\n\t\tif (!m_renderer->get_render_settings().accumulate)\n\t\t\tImGuiRenderer::show_help_marker(\"Feature disabled because accumulation is not enabled.\");\n\t\telse\n\t\t\tImGuiRenderer::show_help_marker(\"Starts rendering a sequence of frame. After each frame has \"\n\t\t\t\t\t\t\t\t\t\t\t\"converged (according to the various stopping conditions set in \"\n\t\t\t\t\t\t\t\t\t\t\t\"\\\"Settings -> Render Settings\\\"), a screenshot is dumped to \"\n\t\t\t\t\t\t\t\t\t\t\t\"the disk, the animations are step and the next frame starts \"\n\t\t\t\t\t\t\t\t\t\t\t\"rendering.\");\n\t\tImGui::EndDisabled();\n\n\t\tif (!m_renderer->get_render_settings().accumulate)\n\t\t{\n\t\t\tImGui::TreePush(\"Enable accumulation for start rendering frame sequence tree\");\n\t\t\tif (ImGui::Button(\"Enable accumulation\"))\n\t\t\t{\n\t\t\t\tm_renderer->get_render_settings().accumulate = true;\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::TreePop();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n}\n\nvoid ImGuiAnimationWindow::draw_camera_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Camera\"))\n\t{\n\t\tImGui::TreePush(\"Camera animation tree\");\n\n\t\tCamera& camera = m_renderer->get_camera();\n\t\tCameraAnimation& camera_animation = m_renderer->get_camera_animation();\n\n\t\tImGui::Checkbox(\"Animate\", &camera_animation.animate);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::CollapsingHeader(\"Rotate around object\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Rotate around object camera animation tree\");\n\n\t\t\tstatic bool default_rotation_set = false;\n\t\t\tstatic int selected_object = 0;\n\n\t\t\tImGui::Checkbox(\"Rotate around object during animation\", &camera_animation.m_do_rotation_animation);\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::Text(\"Rotate around object\");\n\t\t\tif (ImGui::BeginListBox(\"##rotate_around_objects\", ImVec2(-FLT_MIN, 7 * ImGui::GetTextLineHeightWithSpacing())))\n\t\t\t{\n\t\t\t\tconst std::vector<std::string>& mesh_names = m_renderer->get_mesh_names();\n\t\t\t\tconst std::vector<std::string>& material_names = m_renderer->get_material_names();\n\t\t\t\tfor (int n = 0; n < mesh_names.size(); n++)\n\t\t\t\t{\n\t\t\t\t\tconst bool is_selected = (selected_object == n);\n\n\t\t\t\t\tconst std::string& mesh_name = mesh_names[n];\n\t\t\t\t\tconst std::string& material_name = material_names[m_renderer->get_mesh_material_indices()[n]];\n\t\t\t\t\tstd::string object_text = mesh_name + \" (\" + material_name + \")\";\n\t\t\t\t\tif (ImGui::Selectable(object_text.c_str(), is_selected))\n\t\t\t\t\t{\n\t\t\t\t\t\tselected_object = n;\n\n\t\t\t\t\t\tfloat3 object_center = m_renderer->get_mesh_bounding_boxes()[n].get_center();\n\t\t\t\t\t\tcamera_animation.m_rotate_around_point = glm::vec3(object_center.x, object_center.y, object_center.z);\n\t\t\t\t\t}\n\n\t\t\t\t\t// Set the initial focus when opening the combo (scrolling + keyboard navigation focus)\n\t\t\t\t\tif (is_selected)\n\t\t\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t\t\t}\n\t\t\t\tImGui::EndListBox();\n\t\t\t}\n\n\t\t\tif (!default_rotation_set)\n\t\t\t{\n\t\t\t\tif (m_renderer->get_mesh_bounding_boxes().size() > 0)\n\t\t\t\t{\n\t\t\t\t\tfloat3 default_rotate_around_point = m_renderer->get_mesh_bounding_boxes()[0].get_center();\n\t\t\t\t\tcamera_animation.m_rotate_around_point = glm::vec3(default_rotate_around_point.x, default_rotate_around_point.y, default_rotate_around_point.z);\n\n\t\t\t\t\tdefault_rotation_set = true;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Rotation options\");\n\n\t\t\tstatic float& rotation_value = camera_animation.m_rotation_value;\n\t\t\tstatic CameraRotationType& rotation_type = camera_animation.m_rotation_type;\n\t\t\tif (ImGui::RadioButton(\"##second_per_rotation\", (int*)&rotation_type, 0))\n\t\t\t\trotation_value = 8.0f;\n\t\t\tImGui::SameLine();\n\t\t\tImGui::BeginDisabled(rotation_type != 0);\n\t\t\tif (ImGui::SliderFloat(\"Rotation duration (seconds per 360 degrees)\", &rotation_value, 2.0f, 10.0f))\n\t\t\t\trotation_value = std::max(0.001f, rotation_value);\n\t\t\tImGuiRenderer::show_help_marker(\"The camera will take that much time to rotate \"\n\t\t\t\t\t\t\t\t\t\t\t\"by 360 degrees. This is probably what you want \"\n\t\t\t\t\t\t\t\t\t\t\t\"for real time (no accumulation) camera animation.\");\n\t\t\tImGui::EndDisabled();\n\n\t\t\tif (ImGui::RadioButton(\"##degrees_per_frame\", (int*)&rotation_type, 1))\n\t\t\t\trotation_value = 1.0f;\n\t\t\tImGui::SameLine();\n\t\t\tImGui::BeginDisabled(rotation_type != 1);\n\t\t\tImGui::SliderFloat(\"Rotation speed (degrees per frame)\", &rotation_value, 0.0f, 90.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"The camera will rotate by the given degrees \"\n\t\t\t\t\t\t\t\t\t\t\t\"at each frame. This is probably what you want \"\n\t\t\t\t\t\t\t\t\t\t\t\"for frame sequence rendering.\");\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGuiSettingsWindow::draw_camera_panel_static(\"Camera Settings\", m_render_window, m_renderer);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiAnimationWindow::draw_envmap_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Environment Map\"))\n\t{\n\t\tImGui::TreePush(\"Envmap animation window tree\");\n\n\t\tbool& animate_envmap = m_renderer->get_envmap().animate;\n\t\tfloat& animation_speed_X = m_renderer->get_envmap().animation_speed_X;\n\t\tfloat& animation_speed_Y = m_renderer->get_envmap().animation_speed_Y;\n\t\tfloat& animation_speed_Z = m_renderer->get_envmap().animation_speed_Z;\n\n\t\tImGui::Checkbox(\"Animate\", &animate_envmap);\n\n\t\tif (animate_envmap)\n\t\t{\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::Text(\"Speeds are in degrees per second\");\n\t\t\tImGui::SliderFloat(\"Animation Speed X\", &animation_speed_X, 0.0f, 360.0f);\n\t\t\tImGui::SliderFloat(\"Animation Speed Y\", &animation_speed_Y, 0.0f, 360.0f);\n\t\t\tImGui::SliderFloat(\"Animation Speed Z\", &animation_speed_Z, 0.0f, 360.0f);\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiAnimationWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_ANIMATION_WINDOW_H\n#define IMGUI_ANIMATION_WINDOW_H\n\n#include \"Renderer/GPURenderer.h\"\n\nclass RenderWindow;\n\nclass ImGuiAnimationWindow\n{\npublic:\n\tstatic const char* TITLE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid draw();\n\tvoid draw_header();\n\tvoid draw_camera_panel();\n\tvoid draw_envmap_panel();\n\tvoid draw_frame_sequence_rendering_panel();\n\nprivate:\n\tRenderWindow* m_render_window = nullptr;\n\n\tstd::shared_ptr<GPURenderer> m_renderer;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiLogWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/ImGui/ImGuiLogWindow.h\"\n#include \"UI/RenderWindow.h\"\n\nconst char* ImGuiLogWindow::TITLE = \"Logs\";\nconst float ImGuiLogWindow::BASE_SIZE = 250.0f;\n\nextern ImGuiLogger g_imgui_logger;\n\nvoid ImGuiLogWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n}\n\nvoid ImGuiLogWindow::draw()\n{\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 10.0f));\n\n\tImGui::Begin(ImGuiLogWindow::TITLE, nullptr);\n\n\tg_imgui_logger.draw(ImGuiLogWindow::TITLE);\n\n\tImGui::PopStyleVar(3);\n\tImGui::End();\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiLogWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_LOG_WINDOW_H\n#define IMGUI_LOG_WINDOW_H\n\n#include \"UI/ImGui/ImGuiLogger.h\"\n\n#include \"imgui.h\"\n\nclass RenderWindow;\n\nclass ImGuiLogWindow\n{\npublic:\n\tstatic const char* TITLE;\n\tstatic const float BASE_SIZE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid draw();\n\nprivate:\n\tRenderWindow* m_render_window;\n\n\tImVec2 m_current_size;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiLogger.cpp",
    "content": "#include \"UI/ImGui/ImGuiLogger.h\"\n\nImGuiLogger g_imgui_logger;\n\nconst char* ImGuiLogger::BACKGROUND_KERNEL_PARSING_LINE_NAME = \"BackgroundKernelParsingLineName\";\nconst char* ImGuiLogger::BACKGROUND_KERNEL_COMPILATION_LINE_NAME = \"BackgroundKernelPrecompilationLineName\";\n\nImGuiLogger::ImGuiLogger()\n{\n    clear();\n}\n\nImGuiLogger::~ImGuiLogger()\n{\n    m_destroyed = true;\n}\n\nvoid ImGuiLogger::add_line_with_name(ImGuiLoggerSeverity severity, const char* line_name, const char* fmt, ...)\n{\n    va_list args;\n    va_start(args, fmt);\n    add_line_internal(severity, line_name, fmt, args);\n    va_end(args);\n}\n\nvoid ImGuiLogger::add_line(ImGuiLoggerSeverity severity, const char* fmt, ...)\n{\n    va_list args;\n    va_start(args, fmt);\n    add_line_internal(severity, nullptr, fmt, args);\n    va_end(args);\n}\n\nvoid ImGuiLogger::draw(const char* title, bool* p_open)\n{\n    if (!ImGui::Begin(title, p_open))\n    {\n        ImGui::End();\n        return;\n    }\n\n    // Options menu\n    if (ImGui::BeginPopup(\"Options\"))\n    {\n        ImGui::Checkbox(\"Auto-scroll\", &m_auto_scroll);\n        ImGui::EndPopup();\n    }\n\n    // Main window\n    if (ImGui::Button(\"Options\"))\n        ImGui::OpenPopup(\"Options\");\n    ImGui::SameLine();\n    bool clear_button = ImGui::Button(\"Clear\");\n    ImGui::SameLine();\n    bool copy = ImGui::Button(\"Copy\");\n    ImGui::SameLine();\n    m_text_filter.Draw(\"Filter\", -100.0f);\n    ImGui::SameLine();\n\n    ImGui::Separator();\n\n    if (ImGui::BeginChild(\"scrolling\", ImVec2(0, 0), ImGuiChildFlags_None, ImGuiWindowFlags_HorizontalScrollbar))\n    {\n        if (clear_button)\n            clear();\n        if (copy)\n            ImGui::LogToClipboard();\n        if (m_log_lines.size() == 0)\n        {\n            ImGui::EndChild();\n            ImGui::End();\n\n            return;\n        }\n\n        ImGui::PushStyleVar(ImGuiStyleVar_ItemSpacing, ImVec2(0, 0));\n        if (m_text_filter.IsActive())\n        {\n            // In this example we don't use the clipper when Filter is enabled.\n            // This is because we don't have random access to the result of our filter.\n            // A real application processing logs with ten of thousands of entries may want to store the result of\n            // search/filter.. especially if the filtering function is not trivial (e.g. reg-exp).\n            for (int line_no = 0; line_no < m_actual_lines.size(); line_no++)\n            {\n                std::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*> line_view_pair = get_line_from_index(line_no);\n\n                std::shared_ptr<ImGuiLoggerLine> line = line_view_pair.first;\n                std::string_view* str_view = line_view_pair.second;\n\n                const char* line_start = str_view->data();\n                const char* line_end = line_start + str_view->length();\n\n                if (m_text_filter.PassFilter(line_start, line_end))\n                {\n                    ImGui::PushStyleColor(ImGuiCol_Text, ImGuiLogger::get_severity_color(line->severity));\n                    ImGui::TextUnformatted(line_start, line_end);\n                    ImGui::PopStyleColor();\n                }\n            }\n        }\n        else\n        {\n            // The simplest and easy way to display the entire buffer:\n            //   ImGui::TextUnformatted(buf_begin, buf_end);\n            // And it'll just work. TextUnformatted() has specialization for large blob of text and will fast-forward\n            // to skip non-visible lines. Here we instead demonstrate using the clipper to only process lines that are\n            // within the visible area.\n            // If you have tens of thousands of items and their processing cost is non-negligible, coarse clipping them\n            // on your side is recommended. Using ImGuiListClipper requires\n            // - A) random access into your data\n            // - B) items all being the  same height,\n            // both of which we can handle since we have an array pointing to the beginning of each line of text.\n            // When using the filter (in the block of code above) we don't have random access into the data to display\n            // anymore, which is why we don't use the clipper. Storing or skimming through the search result would make\n            // it possible (and would be recommended if you want to search through tens of thousands of entries).\n            ImGuiListClipper clipper;\n            clipper.Begin(m_total_number_of_lines);\n            while (clipper.Step())\n            {\n                for (int line_no = clipper.DisplayStart; line_no < clipper.DisplayEnd; line_no++)\n                {\n                    std::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*> line_view_pair = get_line_from_index(line_no);\n\n                    std::shared_ptr<ImGuiLoggerLine> line = line_view_pair.first;\n                    std::string_view* str_view = line_view_pair.second;\n\n                    const char* line_start = str_view->data();\n                    const char* line_end = line_start + str_view->length();\n\n                    ImGui::PushStyleColor(ImGuiCol_Text, ImGuiLogger::get_severity_color(line->severity));\n                    ImGui::TextUnformatted(line_start, line_end);\n                    ImGui::PopStyleColor();\n                }\n            }\n            clipper.End();\n        }\n        ImGui::PopStyleVar();\n\n        // Keep up at the bottom of the scroll region if we were already at the bottom at the beginning of the frame.\n        // Using a scrollbar or mouse-wheel will take away from the bottom edge.\n        if (m_auto_scroll && ImGui::GetScrollY() >= ImGui::GetScrollMaxY())\n            ImGui::SetScrollHereY(1.0f);\n    }\n    ImGui::EndChild();\n    ImGui::End();\n}\n\nvoid ImGuiLogger::clear()\n{\n    m_log_lines.clear();\n    m_actual_lines.clear();\n    m_index_in_actual_lines.clear();\n    m_index_to_line_cache.clear();\n    m_names_to_lines.clear();\n    m_total_number_of_lines = 0;\n}\n\nvoid ImGuiLogger::update_line(const char* line_name, const char* fmt, ...)\n{\n    std::lock_guard<std::mutex> lock(m_mutex);\n\n    if (m_destroyed)\n        return;\n\n    auto find = m_names_to_lines.find(line_name);\n    if (find == m_names_to_lines.end())\n    {\n        add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Cannot update line with name %s. There is no such line. Did you forget to call add_line(severity, LINE_NAME, ...)?\", line_name);\n        return;\n    }\n\n    std::shared_ptr<ImGuiLoggerLine> line = find->second;\n    std::string prefix = ImGuiLogger::get_severity_prefix(line->severity);\n\n    va_list args;\n    va_start(args, fmt);\n    std::string formatted_string = prefix + compute_formatted_string(fmt, args) + \"\\n\";\n    va_end(args);\n\n    // Updating the line\n    line->string = formatted_string;\n\n    // Updating the actual lines\n    int nb_actual_lines_before_update = m_actual_lines[m_index_in_actual_lines[line]].size();\n    compute_actual_lines(line);\n\n    // Updating a line invalidates the cache if the number of actual lines\n    // changed\n    int nb_actual_lines_after_update = m_actual_lines[m_index_in_actual_lines[line]].size();\n    if (nb_actual_lines_before_update != nb_actual_lines_after_update)\n        m_index_to_line_cache.clear();\n\n    m_total_number_of_lines -= nb_actual_lines_before_update;\n    m_total_number_of_lines += nb_actual_lines_after_update;\n}\n\nImU32 ImGuiLogger::get_severity_color(ImGuiLoggerSeverity severity)\n{\n    switch (severity)\n    {\n    case IMGUI_LOGGER_INFO:\n        return IM_COL32(255, 255, 255, 255);\n\n    case IMGUI_LOGGER_WARNING:\n        return IM_COL32(255, 255, 0, 255);\n\n    case IMGUI_LOGGER_ERROR:\n        return IM_COL32(255, 0, 0, 255);\n\n    default:\n        return IM_COL32(255, 0, 255, 255);\n    }\n}\n\nstd::string ImGuiLogger::get_severity_prefix(ImGuiLoggerSeverity severity)\n{\n    switch (severity)\n    {\n    case IMGUI_LOGGER_INFO:\n        return \"[INFO] \";\n\n    case IMGUI_LOGGER_WARNING:\n        return \"[WARN] \";\n\n    case IMGUI_LOGGER_ERROR:\n        return \"[ERR ] \";\n\n    default:\n        return \"\";\n    }\n}\n\nvoid ImGuiLogger::add_line_internal(ImGuiLoggerSeverity severity, const char* line_name, const char* fmt, va_list args)\n{\n    // For logger's thread safety\n    std::lock_guard<std::mutex> lock(m_mutex);\n\n    std::string prefix = ImGuiLogger::get_severity_prefix(severity);\n    std::string formatted_string_no_endl = prefix + compute_formatted_string(fmt, args);\n    std::cout << formatted_string_no_endl << std::endl; // Also printing to the console with std::endl to flush the output\n\n    std::string formatted_string = formatted_string_no_endl + \"\\n\";\n    int line_index = m_log_lines.size();\n\n    std::shared_ptr<ImGuiLoggerLine> logger_line = std::make_shared<ImGuiLoggerLine>(formatted_string, severity);\n    m_log_lines.push_back(logger_line);\n    m_index_in_actual_lines[logger_line] = line_index;\n\n    compute_actual_lines(logger_line);\n    m_total_number_of_lines += m_actual_lines[line_index].size();\n\n    if (line_name != nullptr)\n        set_line_name(logger_line, line_name);\n}\n\nvoid ImGuiLogger::set_line_name(std::shared_ptr<ImGuiLoggerLine> line, const char* line_name)\n{\n    m_names_to_lines[line_name] = line;\n}\n\nstd::string ImGuiLogger::compute_formatted_string(const char* fmt, va_list args)\n{\n    // Copying the arg list because the first call to vsnprintf modifies args\n    // and so if we use args again in the second call to vsnprintf, we're going\n    // to get garbage in the formatted output \n    va_list args_copy;\n    va_copy(args_copy, args);\n\n    // Calculating formatted string length by calling with NULL. + 1 for the '\\0'\n    int string_length = vsnprintf(NULL, 0, fmt, args_copy) + 1;\n\n    va_end(args_copy);\n\n    std::vector<char> string_buffer(string_length);\n    vsnprintf(string_buffer.data(), string_length, fmt, args);\n\n    return std::string(string_buffer.data());\n}\n\nvoid ImGuiLogger::compute_actual_lines(std::shared_ptr<ImGuiLoggerLine> logger_line)\n{\n    int string_length = logger_line->string.size();\n\n    int index = m_index_in_actual_lines.at(logger_line);\n    if (index >= m_actual_lines.size())\n        // If the entry doesn't exist yet\n        m_actual_lines.push_back(std::vector<std::string_view>());\n\n    std::vector<std::string_view>& actual_lines = m_actual_lines[index];\n    // We're going to recompute the lines so clearing them first\n    actual_lines.clear();\n\n    int previous_line_feed_pos = 0;\n    for (int character_pos = 0; character_pos < string_length; character_pos++)\n    {\n        if (logger_line->string[character_pos] == '\\n')\n        {\n            const char* line_start = logger_line->string.c_str() + previous_line_feed_pos;\n            int line_length = character_pos - previous_line_feed_pos;\n\n            actual_lines.push_back(std::string_view(line_start, line_length));\n\n            // + 1 to skip the '\\n'\n            previous_line_feed_pos = character_pos + 1;\n        }\n    }\n}\n\nstd::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*> ImGuiLogger::get_line_from_index(int index)\n{\n    const auto& find = m_index_to_line_cache.find(index);\n    if (find != m_index_to_line_cache.end())\n        return find->second;\n\n    int total = 0;\n    for (int actual_line_index = 0; actual_line_index < m_actual_lines.size(); actual_line_index++)\n    {\n        std::vector<std::string_view>& actual_lines = m_actual_lines[actual_line_index];\n\n        total += actual_lines.size();\n        if (total > index)\n        {\n            // This means that the line we're looking for is in the current ImGuiLoggerLine\n\n            int offset = total - actual_lines.size();\n            int index_in_actual_lines = index - offset;\n\n            std::shared_ptr<ImGuiLoggerLine> logger_line = m_log_lines[actual_line_index];\n            std::string_view* line = &actual_lines[index_in_actual_lines];\n\n            std::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*> pair(logger_line, line);\n            m_index_to_line_cache[index] = pair;\n\n            return pair;\n        }\n\n    }\n\n    return std::make_pair(nullptr, nullptr);\n}"
  },
  {
    "path": "src/UI/ImGui/ImGuiLogger.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_LOGGER_H\n#define IMGUI_LOGGER_H\n\n#include \"UI/ImGui/ImGuiLoggerSeverity.h\"\n#include \"UI/ImGui/ImGuiLoggerLine.h\"\n\n#include \"imgui.h\"\n\n#include <iostream>\n#include <memory>\n#include <mutex>\n#include <string>\n#include <string_view>\n#include <unordered_map>\n#include <vector>\n\n/**\n * Class derived from imgui_demo.cpp \"ExampleAppLog\"\n */\n\nclass ImGuiLogger\n{\npublic:\n    static const char* BACKGROUND_KERNEL_PARSING_LINE_NAME;\n    static const char* BACKGROUND_KERNEL_COMPILATION_LINE_NAME;\n\n    ImGuiLogger();\n    ~ImGuiLogger();\n\n    void add_line_with_name(ImGuiLoggerSeverity severity, const char* line_name, const char* fmt, ...) IM_FMTARGS(4);\n    void add_line(ImGuiLoggerSeverity severity, const char* fmt, ...) IM_FMTARGS(3);\n\n    void draw(const char* title, bool* p_open = NULL);\n    void clear();\n\n    void update_line(const char* line_name, const char* fmt, ...);\n\n    static ImU32 get_severity_color(ImGuiLoggerSeverity severity);\n\nprivate:\n    void add_line_internal(ImGuiLoggerSeverity severity, const char* line_name, const char* fmt, va_list args);\n\n    void set_line_name(std::shared_ptr<ImGuiLoggerLine> line, const char* line_name);\n\n    std::string compute_formatted_string(const char* fmt, va_list args);\n    void compute_actual_lines(std::shared_ptr<ImGuiLoggerLine> logger_line);\n\n    std::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*> get_line_from_index(int index);\n\n    static std::string get_severity_prefix(ImGuiLoggerSeverity severity);\n\n    // Each time you call add_log(), one entry is added in there with the whole text\n    // and severity.\n    // We're using shared_ptr here because when adding new lines to m_log_lines, the\n    // vector may be resized in which case, all instances of ImGuiLoggerLine will be\n    // moved and references/pointers that we had on it become invalid.\n    // By using shared_ptr, we're allocating the lines on the heap and thus we always\n    // keep valid references to them\n    std::vector<std::shared_ptr<ImGuiLoggerLine>> m_log_lines;\n\n    // If you call add_log() with a text that contains multiple \"\\n\" (i.e. multiple lines)\n    // each individual line will be added in that vector. This is used for drawing properly\n    // because drawing needs the actual lines separated by \\n, not the \"lines\" that the entire\n    // string that the user gave when calling add_log()\n    std::vector<std::vector<std::string_view>> m_actual_lines;\n    // For a given ImGuiLoggerLine, the value is the index in 'm_actual_lines' of that ImGuiLoggerLine\n    // so this map can be used to retrieve the actual lines (vector of std::string_view)\n    // of an ImGuiLoggerLine\n    std::unordered_map<std::shared_ptr<ImGuiLoggerLine>, int> m_index_in_actual_lines;\n    // Cache for the get_line_from_index() method. If we ask for the same index twice, \n    // we can just look in the cache for what the line was for this index.\n    // The cache is invalidated if a call to 'update_line()' modifies the number of\n    // actual lines of an ImGuiLoggerLine (by giving a text that contains more '\\n' than\n    // the previous for example)\n    std::unordered_map<int, std::pair<std::shared_ptr<ImGuiLoggerLine>, std::string_view*>> m_index_to_line_cache;\n    // This variable is equivalent to: for (auto& l : m_actual_lines) total += l.size();\n    int m_total_number_of_lines = 0;\n\n    // User given names to their associated line\n    std::unordered_map<const char*, std::shared_ptr<ImGuiLoggerLine>> m_names_to_lines;\n\n    ImGuiTextFilter m_text_filter;\n\n    bool m_auto_scroll = true;  // Keep scrolling if already at the bottom.\n\n    // For logger thread safety\n    std::mutex m_mutex;\n\n    // Used for threads that may still want to access this logger after\n    // it's been destroyed by another thread\n    bool m_destroyed = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiLoggerLine.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_LOGGER_LINE_H\n#define IMGUI_LOGGER_LINE_H\n\n#include \"UI/ImGui/ImGuiLoggerSeverity.h\"\n\n#include <string>\n\nstruct ImGuiLoggerLine\n{\n\tImGuiLoggerLine(char* line_string, ImGuiLoggerSeverity line_severity) : string(line_string), severity(line_severity) {};\n\tImGuiLoggerLine(const std::string& line_string, ImGuiLoggerSeverity line_severity) : string(line_string), severity(line_severity) {};\n\n\tImGuiLoggerSeverity severity;\n\tstd::string string;\n};\n\n#endif"
  },
  {
    "path": "src/UI/ImGui/ImGuiLoggerSeverity.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_LOGGER_SEVERITY_H\n#define IMGUI_LOGGER_SEVERITY_H\n\nenum ImGuiLoggerSeverity\n{\n    IMGUI_LOGGER_INFO,\n    IMGUI_LOGGER_WARNING,\n    IMGUI_LOGGER_ERROR\n};\n\n#endif"
  },
  {
    "path": "src/UI/ImGui/ImGuiObjectsWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/ImGui/ImGuiObjectsWindow.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"imgui.h\"\n#include \"misc/cpp/imgui_stdlib.h\"\n\nconst char* ImGuiObjectsWindow::TITLE = \"Objects\";\n\nstruct MaterialOverrideState\n{\n\tbool override_base_color = false;\n\tbool override_roughness = false;\n\tbool override_anisotropy = false;\n\tbool override_anisotropy_rotation = false;\n\n\tbool override_specular = false;\n\tbool override_specular_color = false;\n\tbool override_specular_tint_strength = false;\n\tbool override_specular_darkening = false;\n\t\n\tbool override_metallic = false;\n\tbool override_F82_reflectivity = false;\n\tbool override_F90_reflectivity = false;\n\tbool override_F90_falloff_exponent = false;\n\tbool override_second_roughness_weight = false;\n\tbool override_second_roughness = false;\n\n\tbool override_sheen_strength = false;\n\tbool override_sheen_color = false;\n\tbool override_sheen_roughness = false;\n\n\tbool override_coat_strength = false;\n\tbool override_coat_medium_absorption = false;\n\tbool override_coat_medium_thickness = false;\n\tbool override_coat_roughness = false;\n\tbool override_coat_roughening = false;\n\tbool override_coat_darkening = false;\n\tbool override_coat_anisotropy = false;\n\tbool override_coat_anisotropy_rotation = false;\n\tbool override_coat_IOR = false;\n\n\tbool override_transmission = false;\n\tbool override_diffuse_transmission = false;\n\tbool override_IOR = false;\n\tbool override_absorption_distance = false;\n\tbool override_absorption_color = false;\n\tbool override_dispersion_abbe = false;\n\tbool override_dispersion_scale = false;\n\tbool override_dielectric_priority = false;\n\tbool override_thin_material = false;\n\n\tbool override_thin_film = false;\n\tbool override_thin_film_thickness = false;\n\tbool override_thin_film_ior = false;\n\tbool override_thin_film_do_ior_override = false;\n\tbool override_thin_film_base_ior_override = false;\n\tbool override_thin_film_kappa_3 = false;\n\tbool override_thin_film_hue_shift = false;\n\n\tbool override_emission = false;\n\tbool override_emission_strength = false;\n\n\tbool override_opacity = false;\n};\n\nvoid ImGuiObjectsWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n\n\tm_renderer = m_render_window->get_renderer();\n}\n\nvoid ImGuiObjectsWindow::draw()\n{\n\tImGui::Begin(ImGuiObjectsWindow::TITLE);\n\n\tdraw_global_objects_panel();\n\tdraw_objects_panel();\n\n\tImGui::End();\n}\n\ntemplate <typename T>\nvoid apply_material_override(bool override_flag, T CPUMaterial::* property, const T& override_value, std::vector<CPUMaterial>& materials_to_override)\n{\n\tif (override_flag)\n\t\tfor (CPUMaterial& renderer_mat : materials_to_override)\n\t\t\trenderer_mat.*property = override_value;\n}\n\nbool draw_material_override_line_common(bool& override_state_bool)\n{\n\tImGui::TableSetColumnIndex(0);\n\tfloat column_width = ImGui::GetColumnWidth();\n\tfloat radio_width = ImGui::GetFrameHeight();\n\tfloat padding = (column_width - radio_width) / 2.0f;\n\n\tstatic unsigned long long int checkbox_id = 0;\n\tImGui::SetCursorPosX(ImGui::GetCursorPosX() + padding);\n\tImGui::PushID(reinterpret_cast<long long int>(&override_state_bool));\n\tbool changed = ImGui::Checkbox(\"##checkbox_mat_override\", &override_state_bool);\n\tImGui::PopID();\n\n\treturn changed;\n}\n\nbool draw_material_override_line(const std::string& text, bool& override_state_bool, float& material_override_property, float v_min, float v_max, const char* format = \"%.3f\")\n{\n\tbool changed = draw_material_override_line_common(override_state_bool);\n\n\tImGui::TableSetColumnIndex(1);\n\tchanged |= ImGui::SliderFloat(text.c_str(), &material_override_property, v_min, v_max, format);\n\n\treturn changed;\n}\n\nbool draw_material_override_line(const std::string& text, bool& override_state_bool, int& material_override_property, int v_min, int v_max)\n{\n\tbool changed = draw_material_override_line_common(override_state_bool);\n\n\tImGui::TableSetColumnIndex(1);\n\tchanged |= ImGui::SliderInt(text.c_str(), &material_override_property, v_min, v_max);\n\n\treturn changed;\n}\n\nbool draw_material_override_line(const std::string& text, bool& override_state_bool, ColorRGB32F& material_override_property)\n{\n\tbool changed = draw_material_override_line_common(override_state_bool);\n\n\tImGui::TableSetColumnIndex(1);\n\tchanged |= ImGui::ColorEdit3(text.c_str(), (float*)&material_override_property);\n\n\treturn changed;\n}\n\nbool draw_material_override_line(const std::string& text, bool& override_state_bool, bool& material_override_property)\n{\n\tbool changed = draw_material_override_line_common(override_state_bool);\n\n\tImGui::TableSetColumnIndex(1);\n\tchanged |= ImGui::Checkbox(text.c_str(), &material_override_property);\n\n\treturn changed;\n}\n\nvoid ImGuiObjectsWindow::draw_global_objects_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Global material overrider\"))\n\t\treturn;\n\n\tImGui::TreePush(\"Global material overrider tree\");\n\n\tstd::vector<const char*> items = { \"- None\", \"- Lambertian BRDF\", \"- Oren Nayar BRDF\", \"- Principled BSDF\" };\n\tif (ImGui::Combo(\"All Objects BSDF Override\", m_renderer->get_global_compiler_options()->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::BSDF_OVERRIDE), items.data(), items.size()))\n\t{\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> kernel_options = m_renderer->get_global_compiler_options();\n\tstatic CPUMaterial material_override;\n\tstatic MaterialOverrideState override_state;\n\n\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n\n\n\tbool material_override_changed = false;\n\n\tstatic bool override_all = false;\n\tif (ImGui::Checkbox(\"Override All\", &override_all))\n\t{\n\t\tstd::memset(&override_state, override_all, sizeof(MaterialOverrideState));\n\t\tmaterial_override_changed = true;\n\t}\n\tif (ImGui::CollapsingHeader(\"Base Layer\"))\n\t{\n\t\tImGui::TreePush(\"Base layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table base layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 6; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Base color\", override_state.override_base_color, material_override.base_color);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Roughness\", override_state.override_roughness, material_override.roughness, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Anisotropy\", override_state.override_anisotropy, material_override.anisotropy, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Anisotropy rotation\", override_state.override_anisotropy_rotation, material_override.anisotropy_rotation, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 5:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"IOR\", override_state.override_IOR, material_override.ior, 1.0f, 3.0f);\n\t\t\t\t\tif (material_override.ior < 1.0f || material_override.ior > 3.0f && (material_override.do_glass_energy_compensation || material_override.do_specular_energy_compensation))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::SameLine();\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation behavior is undefined for IORs < 1.0f or IORs > 3.0f\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t\t\t}\n\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Specular layer\"))\n\t{\n\t\tImGui::TreePush(\"Specular layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table specular layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 5; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Specular\", override_state.override_specular, material_override.specular, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Specular color\", override_state.override_specular_color, material_override.specular_color);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Specular tint strength\", override_state.override_specular_tint_strength, material_override.specular_tint, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Specular darkening\", override_state.override_specular_darkening, material_override.specular_darkening, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Metallic Layer\"))\n\t{\n\t\tImGui::TreePush(\"Metallic layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table metallic layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 8; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Metallic\", override_state.override_metallic, material_override.metallic, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"F0 Reflectivity\", override_state.override_base_color, material_override.base_color);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 0 degree angles: microfacet-normal \"\n\t\t\t\t\t\t\"and view direction perfectly aligned: looking straigth into \"\n\t\t\t\t\t\t\"the object.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"F82 Reflectivity\", override_state.override_F82_reflectivity, material_override.metallic_F82);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 82 degree angles: microfacet-normal \"\n\t\t\t\t\t\t\"and view direction almost orthogonal.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"F90 Reflectivity\", override_state.override_F90_reflectivity, material_override.metallic_F90);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 90 degree angles: microfacet-normal \"\n\t\t\t\t\t\t\"and view direction perfectly orthogonal.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 5:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"F90 Falloff exponent\", override_state.override_F90_falloff_exponent, material_override.metallic_F90_falloff_exponent, 0.5f, 5.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"The \\\"falloff\\\" controls how wide the influence of F90 is.\\n\"\n\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\"The lower the value, the wider F90's effect will be.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 6:\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Second roughness weight\", override_state.override_second_roughness_weight, material_override.second_roughness_weight, 0.0f, 1.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"The principled BSDF can have two metal lobes. They are exactly the \"\n\t\t\t\t\t\t\"same (F0/F82/F90, Anisotropy, ...) except that they can each have \"\n\t\t\t\t\t\t\"their own roughness.\\n\"\n\t\t\t\t\t\t\"The first metal lobe's roughness is controlled by the general \"\n\t\t\t\t\t\t\"roughness of the material and the second metal lobe's roughness \"\n\t\t\t\t\t\t\"is controlled by 'Second roughness'.\\n\"\n\t\t\t\t\t\t\"The two lobes are then linearly blended together using \"\n\t\t\t\t\t\t\"'Second roughness weight'. 'Second roughness weight' = 1 means \"\n\t\t\t\t\t\t\"that the primary roughness of the material is ignored and there \"\n\t\t\t\t\t\t\"is effectively only the second metallic lobe left.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 7:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Second roughness\", override_state.override_second_roughness, material_override.second_roughness, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Sheen Layer\"))\n\t{\n\t\tImGui::TreePush(\"Sheen layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table sheen layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 4; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Sheen strength\", override_state.override_sheen_strength, material_override.sheen, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Sheen color\", override_state.override_sheen_color, material_override.sheen_color);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Sheen roughness\", override_state.override_sheen_roughness, material_override.sheen_roughness, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Coat Layer\"))\n\t{\n\t\tImGui::TreePush(\"Coat layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table coat layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 10; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat strength\", override_state.override_coat_strength, material_override.coat, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat medium absorption\", override_state.override_coat_medium_absorption, material_override.coat_medium_absorption);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat medium thickness\", override_state.override_coat_medium_thickness, material_override.coat_medium_thickness, 0.0f, 15.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat roughness\", override_state.override_coat_roughness, material_override.coat_roughness, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 5:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat roughening\", override_state.override_coat_roughening, material_override.coat_roughening, 0.0f, 1.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Physical accuracy requires that a rough clearcoat also roughens what's underneath it \"\n\t\t\t\t\t\t\"i.e. the specular/metallic/transmission layers.\\n\"\n\t\t\t\t\t\t\"The option is however given here to artistically disable \"\n\t\t\t\t\t\t\"that behavior by using coat roughening = 0.0f.\");\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 6:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat darkening\", override_state.override_coat_darkening, material_override.coat_darkening, 0.0f, 1.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Because of the total internal reflection that can happen inside the coat layer (i.e. \"\n\t\t\t\t\t\t\"light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the clearcoat will appear will increased \"\n\t\t\t\t\t\t\"saturation.\\n\\n\"\n\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\"This parameter controls the strength of that darkening/increase in saturation.\\n\"\n\t\t\t\t\t\t\"0.0f disables the effect which is non-physically accurate but may be artistically desirable.\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 7:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat anisotropy\", override_state.override_coat_anisotropy, material_override.coat_anisotropy, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 8:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat anisotropy rotation\", override_state.override_anisotropy_rotation, material_override.anisotropy_rotation, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 9:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Coat IOR\", override_state.override_coat_IOR, material_override.coat_ior, 1.0f, 3.0f);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Transmission Layer\"))\n\t{\n\t\tImGui::TreePush(\"Transmission layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table transmission layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 10; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Diffuse transmission\", override_state.override_diffuse_transmission, material_override.diffuse_transmission, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Specular transmission\", override_state.override_transmission, material_override.specular_transmission, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"IOR\", override_state.override_IOR, material_override.ior, 1.0f, 3.0f);\n\t\t\t\t\tif (material_override.ior < 1.0f || material_override.ior > 3.0f && (material_override.do_glass_energy_compensation || material_override.do_specular_energy_compensation))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::SameLine();\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation behavior is undefined for IORs < 1.0f or IORs > 3.0f\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t\t\t}\n\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Absorption distance\", override_state.override_absorption_distance, material_override.absorption_at_distance, 0.0f, 20.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 5:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Absorption color\", override_state.override_absorption_color, material_override.absorption_color);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 6:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Dispersion Abbe number\", override_state.override_dispersion_abbe, material_override.dispersion_abbe_number, 9.0f, 91.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Abbe number for the dispersion of the glass. The lower the number, the stronger the dispersion.\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 7:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Dispersion scale\", override_state.override_dispersion_scale, material_override.dispersion_scale, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 8:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Dielectric priority\", override_state.override_dielectric_priority, material_override.dielectric_priority, 1, StackPriorityEntry::PRIORITY_MAXIMUM);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 9:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin walled\", override_state.override_thin_material, material_override.thin_walled);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Thin-Film Layer\"))\n\t{\n\t\tImGui::TreePush(\"Thin-film layer material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table thin-film layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 8; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin film\", override_state.override_thin_film, material_override.thin_film, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin film thickness\", override_state.override_thin_film_thickness, material_override.thin_film_thickness, 1.0f, 3.0f, \"%.3f nm\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 3:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin film IOR\", override_state.override_thin_film_ior, material_override.thin_film_ior, 1.0f, 3.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 4:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin film hue shift\", override_state.override_thin_film_hue_shift, material_override.thin_film_hue_shift_degrees, 0.0f, 360.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 5:\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Override material IOR\", override_state.override_thin_film_do_ior_override, material_override.thin_film_do_ior_override);\n\n\t\t\t\t\t// BeginDisabled for the cases that follow\n\t\t\t\t\tImGui::BeginDisabled(!material_override.thin_film_do_ior_override);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 6:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Eta IOR override\", override_state.override_thin_film_base_ior_override, material_override.thin_film_base_ior_override, 1.0f, 3.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Overrides the eta parameter of the IOR of the base material. This is not physically based but allows for better artistic control.\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 7:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Kappa IOR override\", override_state.override_thin_film_kappa_3, material_override.thin_film_kappa_3, 0.0f, 5.0f);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Overrides the kappa parameter (extinction coefficient) of the base material. This is not physically based but allows for better artistic control.\");\n\n\t\t\t\t\t// BeginDisabled in \"case 4:\" and we're guaranteed to go through all cases one by one\n\t\t\t\t\tImGui::EndDisabled();\n\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\t\t\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Emission Properties\"))\n\t{\n\t\tImGui::TreePush(\"Emission material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table base layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 3; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Emission\", override_state.override_emission, material_override.emission);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Emission strength\", override_state.override_emission_strength, material_override.emission_strength, 0.0f, 10.0f);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Other properties\"))\n\t{\n\t\tImGui::TreePush(\"Other properties material tree\");\n\n\t\tif (ImGui::BeginTable(\"Table base layer\", 2, ImGuiTableFlags_SizingFixedFit))\n\t\t{\n\t\t\tfor (int row = 0; row < 3; row++)\n\t\t\t{\n\t\t\t\tImGui::TableNextRow();\n\n\t\t\t\tswitch (row)\n\t\t\t\t{\n\t\t\t\tcase 0:\n\t\t\t\t\tImGui::TableSetColumnIndex(0);\n\t\t\t\t\tImGui::Text(\"Override\");\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 1:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Opacity\", override_state.override_opacity, material_override.alpha_opacity, 0.0f, 1.0f);\n\t\t\t\t\tbreak;\n\n\t\t\t\tcase 2:\n\t\t\t\t\tmaterial_override_changed |= draw_material_override_line(\"Thin walled\", override_state.override_thin_material, material_override.thin_walled);\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::EndTable();\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n\n\tImGui::PopItemWidth();\n\n\tif (material_override_changed)\n\t{\n\t\tstd::vector<CPUMaterial> overriden_materials = m_renderer->get_original_materials();\n\t\tmaterial_override.make_safe();\n\n\t\tapply_material_override(override_state.override_base_color, &CPUMaterial::base_color, material_override.base_color, overriden_materials);\n\t\tapply_material_override(override_state.override_roughness, &CPUMaterial::roughness, material_override.roughness, overriden_materials);\n\t\tapply_material_override(override_state.override_anisotropy, &CPUMaterial::anisotropy, material_override.anisotropy, overriden_materials);\n\t\tapply_material_override(override_state.override_anisotropy_rotation, &CPUMaterial::anisotropy_rotation, material_override.anisotropy_rotation, overriden_materials);\n\n\t\tapply_material_override(override_state.override_specular, &CPUMaterial::specular, material_override.specular, overriden_materials);\n\t\tapply_material_override(override_state.override_specular_color, &CPUMaterial::specular_color, material_override.specular_color, overriden_materials);\n\t\tapply_material_override(override_state.override_specular_tint_strength, &CPUMaterial::specular_tint, material_override.specular_tint, overriden_materials);\n\t\tapply_material_override(override_state.override_specular_darkening, &CPUMaterial::specular_darkening, material_override.specular_darkening, overriden_materials);\n\n\t\tapply_material_override(override_state.override_metallic, &CPUMaterial::metallic, material_override.metallic, overriden_materials);\n\t\tapply_material_override(override_state.override_F82_reflectivity, &CPUMaterial::metallic_F82, material_override.metallic_F82, overriden_materials);\n\t\tapply_material_override(override_state.override_F90_reflectivity, &CPUMaterial::metallic_F90, material_override.metallic_F90, overriden_materials);\n\t\tapply_material_override(override_state.override_F90_falloff_exponent, &CPUMaterial::metallic_F90_falloff_exponent, material_override.metallic_F90_falloff_exponent, overriden_materials);\n\t\tapply_material_override(override_state.override_second_roughness_weight, &CPUMaterial::second_roughness_weight, material_override.second_roughness_weight, overriden_materials);\n\t\tapply_material_override(override_state.override_second_roughness, &CPUMaterial::second_roughness, material_override.second_roughness, overriden_materials);\n\n\t\tapply_material_override(override_state.override_sheen_strength, &CPUMaterial::sheen, material_override.sheen, overriden_materials);\n\t\tapply_material_override(override_state.override_sheen_color, &CPUMaterial::sheen_color, material_override.sheen_color, overriden_materials);\n\t\tapply_material_override(override_state.override_sheen_roughness, &CPUMaterial::sheen_roughness, material_override.sheen_roughness, overriden_materials);\n\n\t\tapply_material_override(override_state.override_coat_strength, &CPUMaterial::coat, material_override.coat, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_medium_absorption, &CPUMaterial::coat_medium_absorption, material_override.coat_medium_absorption, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_medium_thickness, &CPUMaterial::coat_medium_thickness, material_override.coat_medium_thickness, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_roughness, &CPUMaterial::coat_roughness, material_override.coat_roughness, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_roughening, &CPUMaterial::coat_roughening, material_override.coat_roughening, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_darkening, &CPUMaterial::coat_darkening, material_override.coat_darkening, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_anisotropy, &CPUMaterial::coat_anisotropy, material_override.coat_anisotropy, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_anisotropy_rotation, &CPUMaterial::coat_anisotropy_rotation, material_override.coat_anisotropy_rotation, overriden_materials);\n\t\tapply_material_override(override_state.override_coat_IOR, &CPUMaterial::coat_ior, material_override.coat_ior, overriden_materials);\n\n\t\tapply_material_override(override_state.override_transmission, &CPUMaterial::diffuse_transmission, material_override.diffuse_transmission, overriden_materials);\n\t\tapply_material_override(override_state.override_transmission, &CPUMaterial::specular_transmission, material_override.specular_transmission, overriden_materials);\n\t\tapply_material_override(override_state.override_IOR, &CPUMaterial::ior, material_override.ior, overriden_materials);\n\t\tapply_material_override(override_state.override_absorption_distance, &CPUMaterial::absorption_at_distance, material_override.absorption_at_distance, overriden_materials);\n\t\tapply_material_override(override_state.override_absorption_color, &CPUMaterial::absorption_color, material_override.absorption_color, overriden_materials);\n\t\tapply_material_override(override_state.override_dispersion_abbe, &CPUMaterial::dispersion_abbe_number, material_override.dispersion_abbe_number, overriden_materials);\n\t\tapply_material_override(override_state.override_dispersion_scale, &CPUMaterial::dispersion_scale, material_override.dispersion_scale, overriden_materials);\n\t\tapply_material_override(override_state.override_dielectric_priority, &CPUMaterial::dielectric_priority, material_override.dielectric_priority, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_material, &CPUMaterial::thin_walled, material_override.thin_walled, overriden_materials);\n\n\t\tapply_material_override(override_state.override_thin_film, &CPUMaterial::thin_film, material_override.thin_film, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_thickness, &CPUMaterial::thin_film_thickness, material_override.thin_film_thickness, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_ior, &CPUMaterial::thin_film_ior, material_override.thin_film_ior, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_do_ior_override, &CPUMaterial::thin_film_do_ior_override, material_override.thin_film_do_ior_override, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_base_ior_override, &CPUMaterial::thin_film_base_ior_override, material_override.thin_film_base_ior_override, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_kappa_3, &CPUMaterial::thin_film_kappa_3, material_override.thin_film_kappa_3, overriden_materials);\n\t\tapply_material_override(override_state.override_thin_film_hue_shift, &CPUMaterial::thin_film_hue_shift_degrees, material_override.thin_film_hue_shift_degrees, overriden_materials);\n\n\t\t// Special case for the emission since it's a private member\n\t\tapply_material_override(override_state.override_emission, &CPUMaterial::emission, material_override.emission, overriden_materials);\n\t\tapply_material_override(override_state.override_emission_strength, &CPUMaterial::emission_strength, material_override.emission_strength, overriden_materials);\n\n\t\tapply_material_override(override_state.override_opacity, &CPUMaterial::alpha_opacity, material_override.alpha_opacity, overriden_materials);\n\n\t\tm_renderer->update_all_materials(overriden_materials);\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::TreePop();\n}\n\nvoid ImGuiObjectsWindow::draw_objects_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Per object settings\"))\n\t\treturn;\n\n\tImGui::TreePush(\"Objects tree\");\n\n\tstd::vector<CPUMaterial> materials = m_renderer->get_current_materials();\n\tconst std::vector<std::string>& material_names = m_renderer->get_material_names();\n\tconst std::vector<std::string>& mesh_names = m_renderer->get_mesh_names();\n\n\tbool material_changed = false;\n\tstatic int currently_selected_material_index = 0;\n\n\tif (ImGui::CollapsingHeader(\"All objects\"))\n\t{\n\t\tstatic std::string filter_string = \"\";\n\t\t// This set contains all the ids of materials that should be displayed in the\n\t\t// list box. This list is refined based on the search that the user has typed\n\t\t// in to filter the materials\n\t\tstatic std::unordered_set<int> filtered_material_indices;\n\n\t\tImGui::TreePush(\"All objects tree\");\n\n\t\t// This boolean variable is to decide whether or not we need to populate the\n\t\t// 'accepted_material_indices' set\n\t\tbool first_time = filter_string == \"\" && filtered_material_indices.size() == 0 && materials.size() > 0;\n\t\tif (ImGui::InputText(\"Search\", &filter_string) || first_time)\n\t\t\tfiltered_material_indices = filter_displayed_materials(materials.size(), material_names, mesh_names, filter_string);\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tif (ImGui::BeginListBox(\"##all_objects\", ImVec2(-FLT_MIN, 15 * ImGui::GetTextLineHeightWithSpacing())))\n\t\t{\n\t\t\tfor (int material_index = 0; material_index < materials.size(); material_index++)\n\t\t\t{\n\t\t\t\tif (filter_string != \"\")\n\t\t\t\t{\n\t\t\t\t\t// The user has filtered the materials, checking if the current material\n\t\t\t\t\t// has been filtered out or not.\n\t\t\t\t\t//\n\t\t\t\t\t// The material isn't filtered out (it is accepted) if its index can be found\n\t\t\t\t\t// in the 'accepted_material_indices' set\n\t\t\t\t\t//\n\t\t\t\t\t// If not, the material has been filetered out\n\t\t\t\t\tif (filtered_material_indices.find(material_index) == filtered_material_indices.end())\n\t\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst bool is_selected = (currently_selected_material_index == material_index);\n\t\t\t\tstd::string text = mesh_names[material_index] + \" (\" + material_names[material_index] + \")\";\n\t\t\t\tif (ImGui::Selectable(text.c_str(), is_selected))\n\t\t\t\t\tcurrently_selected_material_index = material_index;\n\n\t\t\t\t// Set the initial focus when opening the combo (scrolling + keyboard navigation focus)\n\t\t\t\tif (is_selected)\n\t\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t\t}\n\t\t\tImGui::EndListBox();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Emissive objects\"))\n\t{\n\t\tstatic std::string filter_string = \"\";\n\t\t// This set contains all the ids of materials that should be displayed in the\n\t\t// list box. This list is refined based on the search that the user has typed\n\t\t// in to filter the materials\n\t\tstatic std::unordered_set<int> filtered_material_indices;\n\n\t\tImGui::TreePush(\"Emissive objects tree\");\n\n\t\tstatic float global_emissive_objects_factor = 1.0f;\n\t\tif (ImGui::SliderFloat(\"Global Emissive Objects Factor\", &global_emissive_objects_factor, 0.0f, 10.0f))\n\t\t{\n\t\t\tfor (CPUMaterial& material : materials)\n\t\t\t{\n\t\t\t\t\tmaterial.global_emissive_factor = global_emissive_objects_factor;\n\n\t\t\t\t\tmaterial.make_safe();\n\t\t\t}\n\n\t\t\tm_renderer->update_all_materials(materials);\n\t\t\tm_renderer->recompute_emissives_power_alias_table();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\n\t\t// This boolean variable is to decide whether or not we need to populate the\n\t\t// 'accepted_material_indices' set\n\t\tbool first_time = filter_string == \"\" && filtered_material_indices.size() == 0 && materials.size() > 0;\n\t\tif (ImGui::InputText(\"Search\", &filter_string) || first_time)\n\t\t\tfiltered_material_indices = filter_displayed_materials(materials.size(), material_names, mesh_names, filter_string);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tif (ImGui::BeginListBox(\"Emissive objects\", ImVec2(-FLT_MIN, 7 * ImGui::GetTextLineHeightWithSpacing())))\n\t\t{\n\t\t\tfor (int material_index = 0; material_index < materials.size(); material_index++)\n\t\t\t{\n\t\t\t\tif (!materials[material_index].is_emissive())\n\t\t\t\t\tcontinue;\n\n\t\t\t\tif (filter_string != \"\")\n\t\t\t\t{\n\t\t\t\t\t// The user has filtered the materials, checking if the current material\n\t\t\t\t\t// has been filtered out or not.\n\t\t\t\t\t//\n\t\t\t\t\t// The material isn't filtered out (it is accepted) if its index can be found\n\t\t\t\t\t// in the 'accepted_material_indices' set\n\t\t\t\t\t//\n\t\t\t\t\t// If not, the material has been filetered out\n\t\t\t\t\tif (filtered_material_indices.find(material_index) == filtered_material_indices.end())\n\t\t\t\t\t\tcontinue;\n\t\t\t\t}\n\n\t\t\t\tconst bool is_selected = (currently_selected_material_index == material_index);\n\t\t\t\tif (ImGui::Selectable(material_names[material_index].c_str(), is_selected))\n\t\t\t\t\tcurrently_selected_material_index = material_index;\n\n\t\t\t\t// Set the initial focus when opening the combo (scrolling + keyboard navigation focus)\n\t\t\t\tif (is_selected)\n\t\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t\t}\n\t\t\tImGui::EndListBox();\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tif (materials.size() > 0)\n\t{\n\t\tstd::shared_ptr<GPUKernelCompilerOptions> kernel_options = m_renderer->get_global_compiler_options();\n\t\tCPUMaterial& material = materials[currently_selected_material_index];\n\n\t\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n\n\t\tImGui::Text(\"- \"); ImGui::SameLine();\n\t\tImGui::PushStyleColor(ImGuiCol_Text, ImVec4(0.0f, 0.9f, 0.0f, 1.0f));\n\t\tImGui::Text(\"Selected object\"); ImGui::SameLine();\n\t\tImGui::PopStyleColor();\n\t\tImGui::Text(\": \"); ImGui::SameLine();\n\t\tImGui::Text(\"%s\", material_names[currently_selected_material_index].c_str());\n\n\t\tif (ImGui::CollapsingHeader(\"Base Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Base layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"Base color\", (float*)&material.base_color);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Roughness\", &material.roughness, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Anisotropy\", &material.anisotropy, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Anisotropy rotation\", &material.anisotropy_rotation, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"IOR\", &material.ior, 1.0f, 3.0f);\n\t\t\tif (material.ior < 1.0f || material.ior > 3.0f)\n\t\t\t{\n\t\t\t\tImGui::SameLine();\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation behavior is undefined for IORs < 1.0f or IORs > 3.0f\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Specular layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Specular layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Specular\", &material.specular, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"Specular color\", (float*)&material.specular_color);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Specular tint strength\", &material.specular_tint, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Specular darkening\", &material.specular_darkening, 0.0f, 1.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Same as coat darkening but for total internal reflection inside the specular layer \"\n\t\t\t\t\"that sits on top of the diffuse base.\");\n\t\t\tif (material.do_specular_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be globally enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\telse if (material.do_specular_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled for the glossy layer (specular/diffuse). This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Glossy layer energy compensation\", &material.do_specular_energy_compensation);\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to do energy compensation for the glossy layer (specular/diffuse) lobe of this material.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Metallic Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Metallic layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Metallic\", &material.metallic, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"F0 Reflectivity\", (float*)&material.base_color);\n\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 0 degree angles: microfacet-normal \"\n\t\t\t\t\"and view direction perfectly aligned: looking straigth into \"\n\t\t\t\t\"the object.\");\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"F82 Reflectivity\", (float*)&material.metallic_F82);\n\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 82 degree angles: microfacet-normal \"\n\t\t\t\t\"and view direction almost orthogonal.\");\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"F90 Reflectivity\", (float*)&material.metallic_F90);\n\t\t\tImGuiRenderer::show_help_marker(\"Reflectivity color at 90 degree angles: microfacet-normal \"\n\t\t\t\t\"and view direction perfectly orthogonal.\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"F90 Falloff exponent\", &material.metallic_F90_falloff_exponent, 0.5f, 5.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"The \\\"falloff\\\" controls how wide the influence of F90 is.\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"The lower the value, the wider F90's effect will be.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Second roughness weight\", &material.second_roughness_weight, 0.0f, 1.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"The principled BSDF can have two metal lobes. They are exactly the \"\n\t\t\t\t\"same (F0/F82/F90, Anisotropy, ...) except that they can each have \"\n\t\t\t\t\"their own roughness.\\n\"\n\t\t\t\t\"The first metal lobe's roughness is controlled by the general \"\n\t\t\t\t\"roughness of the material and the second metal lobe's roughness \"\n\t\t\t\t\"is controlled by 'Second roughness'.\\n\"\n\t\t\t\t\"The two lobes are then linearly blended together using \"\n\t\t\t\t\"'Second roughness weight'. 'Second roughness weight' = 1 means \"\n\t\t\t\t\"that the primary roughness of the material is ignored and there \"\n\t\t\t\t\"is effectively only the second metallic lobe left.\");\n\n\t\t\tImGui::BeginDisabled(material.second_roughness_weight == 0.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Second roughness\", &material.second_roughness, 0.0f, 1.0f);\n\t\t\tImGui::EndDisabled();\n\n\t\t\tif (material.do_metallic_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be globally enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\telse if (material.do_metallic_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled for the metallic layer. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Metallic layer energy compensation\", &material.do_metallic_energy_compensation);\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to do energy compensation for the metallic layer of this material.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Sheen Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Sheen layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Sheen strength\", &material.sheen, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"Sheen color\", (float*)&material.sheen_color);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Sheen roughness\", &material.sheen_roughness, 0.0f, 1.0f);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Coat Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Coat layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat strength\", &material.coat, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"Coat medium absorption\", (float*)&material.coat_medium_absorption);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat medium thickness\", &material.coat_medium_thickness, 0.0f, 15.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat roughness\", &material.coat_roughness, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat roughening\", &material.coat_roughening, 0.0f, 1.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Physical accuracy requires that a rough clearcoat also roughens what's underneath it \"\n\t\t\t\t\"i.e. the specular/metallic/transmission layers.\\n\"\n\t\t\t\t\"The option is however given here to artistically disable \"\n\t\t\t\t\"that behavior by using coat roughening = 0.0f.\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat darkening\", &material.coat_darkening, 0.0f, 1.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Because of the total internal reflection that can happen inside the coat layer (i.e. \"\n\t\t\t\t\"light bouncing between the coat/BSDF and air/coat interfaces), the BSDF below the clearcoat will appear will increased \"\n\t\t\t\t\"saturation.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This parameter controls the strength of that darkening/increase in saturation.\\n\"\n\t\t\t\t\"0.0f disables the effect which is non-physically accurate but may be artistically desirable.\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat anisotropy\", &material.coat_anisotropy, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat anisotropy rotation\", &material.coat_anisotropy_rotation, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Coat IOR\", &material.coat_ior, 1.0f, 3.0f);\n\t\t\tif (material.do_coat_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be globally enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\telse if (material.do_coat_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled for the clearcoat layer. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Clearcoat layer energy compensation\", &material.do_coat_energy_compensation);\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to do energy compensation for the clearcoat layer of this material.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Transmission Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Transmission layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Diffuse transmission\", &material.diffuse_transmission, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Specular transmission\", &material.specular_transmission, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"IOR\", &material.ior, 1.0f, 3.0f);\n\t\t\tif (material.ior < 1.0f || material.ior > 3.0f && (material.do_glass_energy_compensation || material.do_specular_energy_compensation))\n\t\t\t{\n\t\t\t\tImGui::SameLine();\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation behavior is undefined for IORs < 1.0f or IORs > 3.0f\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Absorption distance\", &material.absorption_at_distance, 0.0f, 20.0f);\n\t\t\tmaterial_changed |= ImGui::ColorEdit3(\"Absorption color\", (float*)&material.absorption_color);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Dispersion Abbe number\", &material.dispersion_abbe_number, 9.0f, 91.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Abbe number for the dispersion of the glass. The lower the number, the stronger the dispersion.\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Dispersion scale\", &material.dispersion_scale, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderInt(\"Dielectric priority\", &material.dielectric_priority, 1, StackPriorityEntry::PRIORITY_MAXIMUM);\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Thin walled\", &material.thin_walled);\n\t\t\tif (material.do_glass_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be globally enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\telse if (material.do_glass_energy_compensation && kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION) == KERNEL_OPTION_FALSE)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Warning: \");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Energy compensation is globally disabled for the glass layer. This material option will have no effect.\\n\"\n\t\t\t\t\t\"Energy compensation can be enabled in \\\"Settings\\\" --> \\\"Sampling\\\" --> \\\"Materials\\\"\", ImVec4(1.0f, 1.0f, 0.0f, 1.0f));\n\t\t\t}\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Glass layer energy compensation\", &material.do_glass_energy_compensation);\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to do energy compensation for the glass layer of this material.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Thin-Film Layer\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Thin film layer material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Thin film\", &material.thin_film, 0.0f, 1.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Thin film thickness\", &material.thin_film_thickness, 0.0f, 2000.0f, \"%.3f nm\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Thin film IOR\", &material.thin_film_ior, 1.0f, 3.0f);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Thin film hue shift\", &material.thin_film_hue_shift_degrees, 0.0f, 360.0f);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Override material IOR\", &material.thin_film_do_ior_override);\n\t\t\tImGui::BeginDisabled(!material.thin_film_do_ior_override);\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Eta IOR override\", &material.thin_film_base_ior_override, 1.0f, 3.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Overrides the eta parameter of the IOR of the base material. This is not physically based but allows for better artistic control.\");\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Kappa IOR override\", &material.thin_film_kappa_3, 0.0f, 5.0f);\n\t\t\tImGuiRenderer::show_help_marker(\"Overrides the kappa parameter (extinction coefficient) of the base material. This is not physically based but allows for better artistic control.\");\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Emission Properties\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Emission material tree\");\n\n\t\t\tbool emission_controlled_by_texture = material.emission_texture_index != MaterialConstants::NO_TEXTURE;\n\t\t\tImGui::BeginDisabled(emission_controlled_by_texture);\n\t\t\t\n\t\t\tbool emission_changed = false;\n\t\t\t// TODO we would need to recompute the alias table for the emissive lights here\n\t\t\temission_changed |= ImGui::ColorEdit3(\"Emission\", (float*)&material.emission, ImGuiColorEditFlags_HDR | ImGuiColorEditFlags_Float);\n\t\t\tImGui::EndDisabled();\n\t\t\tif (emission_controlled_by_texture)\n\t\t\t\tImGuiRenderer::show_help_marker(\"Disabled because the emission of this material is controlled by a texture\");\n\n\t\t\t// TODO we would need to recompute the alias table for the emissive lights here\n\t\t\temission_changed |= ImGui::SliderFloat(\"Emission Strength\", &material.emission_strength, 0.0f, 10.0f);\n\n\t\t\tmaterial_changed |= emission_changed;\n\t\t\tif (emission_changed)\n\t\t\t\tm_renderer->get_NEE_plus_plus_render_pass()->reset(false);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Other properties\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Other properties material tree\");\n\n\t\t\tmaterial_changed |= ImGui::SliderFloat(\"Opacity\", &material.alpha_opacity, 0.0f, 1.0f, \"%.3f\", ImGuiSliderFlags_AlwaysClamp);\n\t\t\tmaterial_changed |= ImGui::Checkbox(\"Thin walled\", &material.thin_walled);\n\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::PopItemWidth();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::Separator();\n\n\t\tmaterial_changed |= draw_material_presets(material);\n\n\t\tif (material_changed)\n\t\t{\n\t\t\tmaterial.make_safe();\n\n\t\t\tm_renderer->update_one_material(material, currently_selected_material_index);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t}\n\n\tImGui::TreePop();\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n}\n\nstd::unordered_set<int> ImGuiObjectsWindow::filter_displayed_materials(int material_count, const std::vector<std::string>& material_names, const std::vector<std::string>& mesh_names, const std::string& filter_string) const\n{\n\tstd::unordered_set<int> accepted_material_indices;\n\tif (filter_string == \"\")\n\t{\n\t\t// If no filter, all materials are accepted so we're adding them all to the set\n\t\tfor (int i = 0; i < material_count; i++)\n\t\t\taccepted_material_indices.insert(i);\n\n\t\treturn accepted_material_indices;\n\t}\n\n\tauto case_insensitive_string_find = [](const std::string& haystack, const std::string& needle)\n\t{\n\t\tauto found = std::search(\n\t\t\thaystack.begin(), haystack.end(), \n\t\t\tneedle.begin(), needle.end(), \n\t\t\t[](unsigned char char1, unsigned char char2) { return std::toupper(char1) == std::toupper(char2); }\n\t\t);\n\n\t\treturn found != haystack.end();\n\t};\n\n\t// Just pure brute force search...\n\t// Will improve if this ever becomes a serious bottleneck\n\tfor (int material_index = 0; material_index < material_count; material_index++)\n\t\tif (case_insensitive_string_find(material_names[material_index], filter_string) || case_insensitive_string_find(mesh_names[material_index], filter_string))\n\t\t\taccepted_material_indices.insert(material_index);\n\n\treturn accepted_material_indices;\n}\n\nbool ImGuiObjectsWindow::draw_material_presets(CPUMaterial& material)\n{\n\tbool material_changed = false;\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tif (!ImGui::CollapsingHeader(\"Material presets\"))\n\t\treturn false;\n\n\tImGui::TreePush(\"Materials presets tree\");\n\tImGui::Text(\"Metals\");\n\tImGui::Separator();\n\n\t// Reference: [Adobe Standard Material, Technical Documentation, Kutz, Hasan, Edmondson]\n\tconst std::vector<std::pair<std::string, std::pair<ColorRGB32F, ColorRGB32F>>> names_to_f0_f82 = {\n\t\t{ \"Silver\", { ColorRGB32F(0.9868f, 0.9830f, 0.9667f), ColorRGB32F(0.9929f, 0.9961f, 1.0000f) } },\n\t\t{ \"Aluminum\", { ColorRGB32F(0.9157f, 0.9226f, 0.9236f), ColorRGB32F(0.9090f, 0.9365f, 0.9596f) } },\n\t\t{ \"Gold\", { ColorRGB32F(1.0000f, 0.7099f, 0.3148f), ColorRGB32F(0.9408f, 0.9636f, 0.9099f) } },\n\t\t{ \"Chromium\", { ColorRGB32F(0.5496f, 0.5561f, 0.5531f), ColorRGB32F(0.7372f, 0.7511f, 0.8170f) } },\n\t\t{ \"Copper\", { ColorRGB32F(1.0000f, 0.6504f, 0.5274f), ColorRGB32F(0.9755f, 0.9349f, 0.9301f) } },\n\t\t{ \"Iron\", { ColorRGB32F(0.8951f, 0.8755f, 0.8154f), ColorRGB32F(0.8551f, 0.8800f, 0.8966f) } },\n\t\t{ \"Mercury\", { ColorRGB32F(0.7815f, 0.7795f, 0.7783f), ColorRGB32F(0.8103f, 0.8532f, 0.9046f) } },\n\t\t{ \"Magnesium\", { ColorRGB32F(0.8918f, 0.8821f, 0.8948f), ColorRGB32F(0.8949f, 0.9147f, 0.9504f) } },\n\t\t{ \"Nickel\", { ColorRGB32F(0.7014f, 0.6382f, 0.5593f), ColorRGB32F(0.8134f, 0.8352f, 0.8725f) } },\n\t\t{ \"Lead\", { ColorRGB32F(0.7363f, 0.7023f, 0.6602f), ColorRGB32F(0.8095f, 0.8369f, 0.8739f) } },\n\t\t{ \"Platinum\", { ColorRGB32F(0.9602f, 0.9317f, 0.8260f), ColorRGB32F(0.9501f, 0.9461f, 0.9352f) } },\n\t\t{ \"Titanium\", { ColorRGB32F(0.4432f, 0.3993f, 0.3599f), ColorRGB32F(0.8627f, 0.9066f, 0.9481f) } },\n\t\t{ \"Zinc\", { ColorRGB32F(0.8759f, 0.8685f, 0.8542f), ColorRGB32F(0.8769f, 0.9037f, 0.9341f) } },\n\t};\n\n\tint line_count = 0;\n\tfor (int i = 0; i < names_to_f0_f82.size(); i++)\n\t{\n\t\tColorRGB32F F0 = names_to_f0_f82[i].second.first;\n\t\tImGui::PushStyleColor(ImGuiCol_Button, ImVec4(F0.r, F0.g, F0.b, 1.0f));\n\t\t// Changing text color from black to white depending on luminance for readability\n\t\tImGui::PushStyleColor(ImGuiCol_Text, F0.luminance() > 0.6f ? ImVec4(0.0f, 0.0f, 0.0f, 1.0f) : ImVec4(1.0f, 1.0f, 1.0f, 1.0f));\n\n\t\tif (ImGui::Button(names_to_f0_f82[i].first.c_str(), /* size */ ImVec2(6.0f * ImGui::GetFontSize(), 1.5f * ImGui::GetFontSize())))\n\t\t{\n\t\t\tmaterial_changed = true;\n\n\t\t\tfloat original_roughness = material.roughness;\n\n\t\t\t// Resetting the material\n\t\t\tmaterial = CPUMaterial();\n\n\t\t\t// Applying preset\n\t\t\tmaterial.roughness = original_roughness;\n\t\t\tmaterial.metallic = 1.0f;\n\t\t\tmaterial.base_color = names_to_f0_f82[i].second.first;\n\t\t\tmaterial.metallic_F82 = names_to_f0_f82[i].second.second;\n\t\t}\n\n\t\tImGui::PopStyleColor();\n\t\tImGui::PopStyleColor();\n\n\t\tline_count++;\n\t\tif (line_count == 5)\n\t\t\tline_count = 0;\n\t\telse\n\t\t\tImGui::SameLine();\n\t}\n\n\tImGui::TreePop();\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\treturn material_changed;\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiObjectsWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_OBJECTS_WINDOW_H\n#define IMGUI_OBJECTS_WINDOW_H\n\n#include \"Renderer/GPURenderer.h\"\n\nclass RenderWindow;\n\nclass ImGuiObjectsWindow\n{\npublic:\n\tstatic const char* TITLE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid draw();\n\tvoid draw_global_objects_panel();\n\tvoid draw_objects_panel();\n\tstd::unordered_set<int> filter_displayed_materials(int material_count, const std::vector<std::string>& material_names, const std::vector<std::string>& mesh_names, const std::string& filter_string) const;\n\tbool draw_material_presets(CPUMaterial& material);\n\nprivate:\n\tRenderWindow* m_render_window = nullptr;\n\n\tstd::shared_ptr<GPURenderer> m_renderer;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiRenderWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/ImGui/ImGuiRenderWindow.h\"\n#include \"UI/RenderWindow.h\"\n\nconst char* ImGuiRenderWindow::TITLE = \"Viewport\";\n\nvoid ImGuiRenderWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n}\n\nvoid ImGuiRenderWindow::draw()\n{\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f));\n\n\tImGui::Begin(ImGuiRenderWindow::TITLE, nullptr);\n\n\t// GetWindowContentRegion() to get the size without the title bar and other decorations.\n\tImVec2 window_size = ImGui::GetContentRegionAvail();\n\n\tif (window_size.x != m_current_size.x || window_size.y != m_current_size.y)\n\t\tm_render_window->resize(window_size.x, window_size.y);\n\n\tImGui::Image((void*)(intptr_t)m_render_window->get_display_view_system()->m_fbo_texture, window_size, ImVec2(0, 1), ImVec2(1, 0));\n\n\tm_current_size = window_size;\n\tm_is_hovered = ImGui::IsWindowHovered();\n\n\tImGui::PopStyleVar(3);\n\tImGui::End();\n}\n\nbool ImGuiRenderWindow::is_hovered() const\n{\n\treturn m_is_hovered;\n}\n\nImVec2 ImGuiRenderWindow::get_size() const\n{\n\treturn m_current_size;\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiRenderWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_RENDER_WINDOW_H\n#define IMGUI_RENDER_WINDOW_H\n\n#include \"imgui.h\"\n\nclass RenderWindow;\n\nclass ImGuiRenderWindow\n{\npublic:\n\tstatic const char* TITLE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid draw();\n\n\tbool is_hovered() const;\n\tImVec2 get_size() const;\n\nprivate:\n\tRenderWindow* m_render_window;\n\n\tImVec2 m_current_size;\n\tbool m_is_hovered = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiRenderer.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/ImGui/ImGuiRenderer.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"imgui_internal.h\"\n\n#include <chrono>\n#include <unordered_map>\n\nImGuiRenderer::ImGuiRenderer()\n{\n\tImGuiViewport* viewport = ImGui::GetMainViewport();\n\tfloat windowDpiScale = viewport->DpiScale;\n\tif (windowDpiScale > 1.0f)\n\t\tImGui::GetStyle().ScaleAllSizes(windowDpiScale);\n}\n\nvoid ImGuiRenderer::init_imgui(GLFWwindow* glfw_window)\n{\n\t// Setting ImGui up\n\tIMGUI_CHECKVERSION();\n\tImGui::CreateContext();\n\tImGuiIO& io = ImGui::GetIO();\n\tio.ConfigFlags |= ImGuiConfigFlags_NavEnableKeyboard;     // Enable Keyboard Controls\n\tio.ConfigFlags |= ImGuiConfigFlags_DockingEnable;\n\n\tImGui_ImplGlfw_InitForOpenGL(glfw_window, true);\n\tImGui_ImplOpenGL3_Init();\n}\n\nvoid ImGuiRenderer::add_tooltip(const std::string& tooltip_text, ImGuiHoveredFlags flags)\n{\n\tif (ImGui::IsItemHovered(flags))\n\t\tImGuiRenderer::wrapping_tooltip(tooltip_text);\n}\n\nvoid ImGuiRenderer::add_warning(const std::string& warning_text)\n{\n\tImGui::TextColored(ImVec4(1.0f, 1.0f, 0.0f, 1.0f), \"Warning: \");\n\tImGuiRenderer::show_help_marker(warning_text.c_str());\n}\n\nbool ImGuiRenderer::ComboWithTooltips(const std::string& combo_text, int* combo_value, const char** items, size_t items_count, const char** tooltips, bool* disabled_items)\n{\n\tif (ImGui::BeginCombo(combo_text.c_str(), items[*combo_value]))\n\t{\n\t\tfor (int i = 0; i < items_count; i++)\n\t\t{\n\t\t\tImGui::BeginDisabled(disabled_items && disabled_items[i]);\n\n\t\t\tconst bool is_selected = (*combo_value == i);\n\n\t\t\tif (ImGui::Selectable(items[i], is_selected))\n\t\t\t{\n\t\t\t\t*combo_value = i;\n\n\t\t\t\tImGui::EndDisabled(); // disabled_items && disabled_items[i]\n\t\t\t\tImGui::EndCombo();\n\n\t\t\t\treturn true;\n\t\t\t}\n\n\t\t\tImGui::EndDisabled(); // disabled_items && disabled_items[i]\n\n\t\t\tImGuiRenderer::add_tooltip(tooltips[i]);\n\n\t\t\tif (is_selected)\n\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t}\n\t\tImGui::EndCombo();\n\t}\n\n\treturn false;\n}\n\nvoid ImGuiRenderer::wrapping_tooltip(const std::string& text)\n{\n\tImGui::SetNextWindowSize(ImVec2(ImGui::GetFontSize() * 32.0f, 0.0f));\n\tImGui::BeginTooltip();\n\tImGui::PushTextWrapPos(0.0f);\n\tImGui::Text(\"%s\", text.c_str());\n\tImGui::PopTextWrapPos();\n\tImGui::EndTooltip();\n}\n\nvoid ImGuiRenderer::show_help_marker(const std::string& text, ImVec4 color)\n{\n\tImGui::SameLine();\n\tif (color.x == -1.0f && color.y == -1.0f && color.z == -1.0 && color.w == -1.0f)\n\t\t// Default \"disabled\" color\n\t\tImGui::TextDisabled(\"(?)\");\n\telse\n\t{\n\t\tImGui::PushStyleColor(ImGuiCol_Text, color);\n\t\tImGui::Text(\"(?)\");\n\t\tImGui::PopStyleColor();\n\t}\n\tadd_tooltip(text);\n}\n\nvoid ImGuiRenderer::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n\tm_imgui_settings_window.set_render_window(render_window);\n\tm_imgui_animation_window.set_render_window(render_window);\n\tm_imgui_tools_window.set_render_window(render_window);\n\tm_imgui_objects_window.set_render_window(render_window);\n\tm_imgui_render_window.set_render_window(render_window);\n\tm_imgui_log_window.set_render_window(render_window);\n}\n\nvoid ImGuiRenderer::set_status_text(const std::string& new_status_text)\n{\n\tm_imgui_settings_window.set_status_text(new_status_text);\n}\n\nstd::string ImGuiRenderer::get_status_text() const\n{\n\treturn m_imgui_settings_window.get_status_text();\n}\n\nvoid ImGuiRenderer::draw_interface()\n{\n\tImGui_ImplOpenGL3_NewFrame();\n\tImGui_ImplGlfw_NewFrame();\n\tImGui::NewFrame();\n\n\trescale_ui();\n\tdraw_dockspace();\n\tdraw_settings_window();\n\tdraw_animation_window();\n\tdraw_tools_window();\n\tdraw_objects_window();\n\tdraw_log_window();\n\tdraw_render_window();\n\n\tglBindFramebuffer(GL_FRAMEBUFFER, 0);\n\tImGui::Render();\n\tImGui_ImplOpenGL3_RenderDrawData(ImGui::GetDrawData());\n}\n\nvoid ImGuiRenderer::rescale_ui()\n{\n\tImGuiIO& io = ImGui::GetIO();\n\tImGuiViewport* viewport = ImGui::GetMainViewport();\n\n\tio.FontGlobalScale = viewport->DpiScale;\n}\n\nvoid ImGuiRenderer::draw_dockspace()\n{\n\t// We are using the ImGuiWindowFlags_NoDocking flag to make the parent window not dockable into,\n\t// because it would be confusing to have two docking targets within each others.\n\tImGuiWindowFlags window_flags = ImGuiWindowFlags_NoDocking;\n\n\tImGuiViewport* viewport = ImGui::GetMainViewport();\n\tImGui::SetNextWindowPos(viewport->Pos);\n\tImGui::SetNextWindowSize(viewport->Size);\n\tImGui::SetNextWindowViewport(viewport->ID);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f));\n\twindow_flags |= ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoDecoration;\n\n\tImGui::Begin(\"HIPRT-Path-Tracer\", nullptr, window_flags);\n\n\t// DockSpace\n\tImGuiIO& io = ImGui::GetIO();\n\tif (io.ConfigFlags & ImGuiConfigFlags_DockingEnable)\n\t{\n\t\tstatic ImGuiDockNodeFlags dockspace_flags = ImGuiDockNodeFlags_PassthruCentralNode;\n\n\t\tImGuiID dockspace_id = ImGui::GetID(\"DockSpace\");\n\t\tImGui::DockSpace(dockspace_id, ImVec2(0.0f, 0.0f), dockspace_flags);\n\n\t\tstatic auto first_time = true;\n\t\tif (first_time)\n\t\t{\n\t\t\tfirst_time = false;\n\n\t\t\tImGui::DockBuilderRemoveNode(dockspace_id); // clear any previous layout\n\t\t\tImGui::DockBuilderAddNode(dockspace_id, dockspace_flags | ImGuiDockNodeFlags_DockSpace);\n\t\t\tImGui::DockBuilderSetNodeSize(dockspace_id, viewport->Size);\n\n\t\t\tint renderer_width = m_render_window->get_renderer()->m_render_resolution.x;\n\t\t\tint renderer_height = m_render_window->get_renderer()->m_render_resolution.y;\n\t\t\tm_dock_id_left = ImGui::DockBuilderSplitNode(dockspace_id, ImGuiDir_Left, ImGuiSettingsWindow::BASE_SIZE / (renderer_width + ImGuiSettingsWindow::BASE_SIZE), nullptr, &dockspace_id);\n\t\t\tm_dock_id_bottom = ImGui::DockBuilderSplitNode(dockspace_id, ImGuiDir_Down, ImGuiLogWindow::BASE_SIZE / (renderer_height + ImGuiLogWindow::BASE_SIZE), nullptr, &dockspace_id);\n\n\t\t\t// we now dock our windows into the docking node we made above\n\t\t\tImGui::DockBuilderDockWindow(ImGuiLogWindow::TITLE, m_dock_id_bottom);\n\t\t\tImGui::DockBuilderDockWindow(ImGuiSettingsWindow::TITLE, m_dock_id_left);\n\t\t\tImGui::DockBuilderDockWindow(ImGuiRenderWindow::TITLE, dockspace_id);\n\t\t\tImGui::DockBuilderFinish(dockspace_id);\n\t\t}\n\t}\n\n\tImGui::PopStyleVar(3);\n\tImGui::End();\n}\n\nvoid ImGuiRenderer::draw_settings_window()\n{\n\tm_imgui_settings_window.draw();\n}\n\nvoid ImGuiRenderer::draw_animation_window()\n{\n\t// \"Tabbing\" / \"docking\" / \"putting\" the window into the left part of the dock\n\t// (basically, this window will act as a tab of the \"Settings\" window\n\tImGui::SetNextWindowDockID(m_dock_id_left, ImGuiCond_Always);\n\n\tm_imgui_animation_window.draw();\n}\n\nvoid ImGuiRenderer::draw_tools_window()\n{\n\t// \"Tabbing\" / \"docking\" / \"putting\" the window into the left part of the dock\n\t// (basically, this window will act as a tab of the \"Settings\" window\n\tImGui::SetNextWindowDockID(m_dock_id_left, ImGuiCond_Always);\n\n\tm_imgui_tools_window.draw();\n}\n\nvoid ImGuiRenderer::draw_objects_window()\n{\n\t// \"Tabbing\" / \"docking\" / \"putting\" the window into the left part of the dock\n\t// (basically, this window will act as a tab of the \"Settings\" window\n\tImGui::SetNextWindowDockID(m_dock_id_left, ImGuiCond_Always);\n\n\tm_imgui_objects_window.draw();\n}\n\nvoid ImGuiRenderer::draw_render_window()\n{\n\tm_imgui_render_window.draw();\n}\n\nvoid ImGuiRenderer::draw_log_window()\n{\n\tm_imgui_log_window.draw();\n}\n\nImGuiRenderWindow& ImGuiRenderer::get_imgui_render_window()\n{\n\treturn m_imgui_render_window;\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiRenderer.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_RENDERER_H\n#define IMGUI_RENDERER_H\n\n#include \"Renderer/OpenImageDenoiser.h\"\n#include \"UI/ApplicationSettings.h\"\n#include \"UI/ImGui/ImGuiAnimationWindow.h\"\n#include \"UI/ImGui/ImGuiToolsWindow.h\"\n#include \"UI/ImGui/ImGuiLogWindow.h\"\n#include \"UI/ImGui/ImGuiObjectsWindow.h\"\n#include \"UI/ImGui/ImGuiRenderWindow.h\"\n#include \"UI/ImGui/ImGuiRendererPerformancePreset.h\"\n#include \"UI/ImGui/ImGuiSettingsWindow.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n\n#include \"imgui.h\"\n#include \"imgui_impl_glfw.h\"\n#include \"imgui_impl_opengl3.h\"\n\n#include <memory>\n\nclass GPURenderer;\nclass RenderWindow;\n\nclass ImGuiRenderer\n{\npublic:\n\tImGuiRenderer();\n\tstatic void init_imgui(GLFWwindow* glfw_window);\n\n\t/**\n  \t * Adds a tooltip to the last widget that auto wraps after 80 characters\n\t */\n\tstatic void wrapping_tooltip(const std::string& text);\n\tstatic void show_help_marker(const std::string& text, ImVec4 color = ImVec4(-1.0f, -1.0f, -1.0f, -1.0f));\n\tstatic void add_tooltip(const std::string& tooltip_text, ImGuiHoveredFlags flags = ImGuiHoveredFlags_AllowWhenDisabled);\n\tstatic void add_warning(const std::string& warning_text);\n\n\tstatic bool ComboWithTooltips(const std::string& combo_text, int* combo_value, const char** items, size_t items_count, const char** tooltips, bool* disabled_items = nullptr);\n\n\tvoid set_render_window(RenderWindow* renderer);\n\tvoid set_status_text(const std::string& new_status_text);\n\tstd::string get_status_text() const;\n\n\tvoid draw_interface();\n\tvoid rescale_ui();\n\tvoid draw_dockspace();\n\tvoid draw_settings_window();\n\tvoid draw_animation_window();\n\tvoid draw_tools_window();\n\tvoid draw_objects_window();\n\tvoid draw_render_window();\n\tvoid draw_log_window();\n\n\tImGuiRenderWindow& get_imgui_render_window();\n\nprivate:\n\tImGuiID m_dock_id_left;\n\tImGuiID m_dock_id_bottom;\n\n\tImGuiSettingsWindow m_imgui_settings_window;\n\tImGuiAnimationWindow m_imgui_animation_window;\n\tImGuiToolsWindow m_imgui_tools_window;\n\tImGuiObjectsWindow m_imgui_objects_window;\n\tImGuiRenderWindow m_imgui_render_window;\n\tImGuiLogWindow m_imgui_log_window;\n\n\tRenderWindow* m_render_window = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiRendererPerformancePreset.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n\n#ifndef IMGUI_RENDERER_PERFORMANCE_PRESET_H\n#define IMGUI_RENDERER_PERFORMANCE_PRESET_H\n\nenum ImGuiRendererSettingsPreset\n{\n    SETTINGS_PRESET_DEFAULT,\n    SETTINGS_PRESET_REFERENCE_PATH_TRACER,\n    SETTINGS_PRESET_MIS_NEE_PATH_TRACER,\n    SETTINGS_PRESET_RIS_NEE_PATH_TRACER,\n    SETTINGS_PRESET_RESTIR_DI_FAST,\n    SETTINGS_PRESET_RESTIR_DI_EFFICIENCY,\n    SETTINGS_PRESET_RESTIR_GI,\n    SETTINGS_PRESET_RESTIR_DI_GI\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiSettingsWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompiler.h\"\n#include \"Device/includes/BSDFs/MicrofacetRegularization.h\"\n#include \"HostDeviceCommon/RenderSettings.h\"\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/RenderPasses/FillGBufferRenderPass.h\"\n#include \"Renderer/RenderPasses/MegaKernelRenderPass.h\"\n#include \"Scene/CameraAnimation.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/ImGui/ImGuiRenderer.h\"\n#include \"UI/ImGui/ImGuiSettingsWindow.h\"\n#include \"UI/RenderWindow.h\"\n\n#include <iostream>\n\nextern GPUKernelCompiler g_gpu_kernel_compiler;\n\nconst char* ImGuiSettingsWindow::TITLE = \"Render settings\";\nconst float ImGuiSettingsWindow::BASE_SIZE = 630.0f;\n\nvoid ImGuiSettingsWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n\n\tm_application_settings = render_window->get_application_settings();\n\tm_renderer = render_window->get_renderer();\n\tm_render_window_denoiser = render_window->get_denoiser();\n\tm_render_window_perf_metrics = m_render_window->get_performance_metrics();\n}\n\nvoid ImGuiSettingsWindow::set_status_text(const std::string& new_status_text)\n{\n\tm_status_text = new_status_text;\n}\n\nstd::string ImGuiSettingsWindow::get_status_text() const\n{\n\treturn m_status_text;\n}\n\nvoid ImGuiSettingsWindow::draw()\n{\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f);\n\tImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(10.0f, 0.0f));\n\tImGui::Begin(ImGuiSettingsWindow::TITLE, nullptr, ImGuiWindowFlags_NoDecoration);\n\n\tdraw_header();\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::SeparatorText(\"General render settings\");\n\tdraw_render_settings_panel();\n\tdraw_render_stopping_conditions_panel();\n\tdraw_camera_panel();\n\tdraw_environment_panel();\n\tdraw_sampling_panel();\n\tdraw_denoiser_panel();\n\tdraw_post_process_panel();\n\tdraw_quality_panel();\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::SeparatorText(\"Other settings\");\n\tdraw_performance_settings_panel();\n\tdraw_performance_metrics_panel();\n\tdraw_shader_kernels_panel();\n\tdraw_debug_panel();\n\n\t// For a little bit of space at the very bottom of the window\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tm_current_size = ImGui::GetWindowSize();\n\n\tImGui::PopStyleVar(3);\n\tImGui::End();\n}\n\nvoid ImGuiSettingsWindow::draw_header()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tImGui::Text(\"Status: %s\", m_status_text.c_str());\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tif (render_settings.accumulate)\n\t\tImGui::Text(\"Render time: %.3fs\", m_render_window->get_current_render_time() / 1000.0f);\n\telse\n\t\tImGui::Text(\"Frame time (GPU): %.3fms\", m_render_window_perf_metrics->get_current_value(GPURenderer::ALL_RENDER_PASSES_TIME_KEY));\n\tImGui::Text(\"%d samples | %.2f samples/s @ %dx%d\", render_settings.sample_number, m_render_window->get_samples_per_second(), m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y);\n\tfloat time_before_viewport_refresh_ms = m_render_window->get_time_ms_before_viewport_refresh();\n\tif (!m_render_window->is_rendering_done() && render_settings.accumulate)\n\t{\n\t\t// Only displaying the refresh timer if we actually need to wait before refreshin'\n\t\t// And also, not displaying that if the rendering is done\n\n\t\tfloat time_before_refresh_seconds = time_before_viewport_refresh_ms / 1000.0f;\n\t\tif (time_before_refresh_seconds > 0.0f)\n\t\t\tImGui::Text(\"Viewport refresh in: %.3fs\", time_before_refresh_seconds);\n\t\telse\n\t\t{\n\t\t\t// Time is < 0.0f i.e. the timer has expired and we're waiting for a refresh\n\t\t\tif (m_renderer->get_gmon_render_pass()->is_render_pass_used() && m_renderer->get_gmon_render_pass()->recomputation_requested())\n\t\t\t\t// If we're waiting for GMoN, indicating it\n\t\t\t\tImGui::Text(\"Viewport refresh in: 0.000s --- Waiting for GMoN\");\n\t\t\telse\n\t\t\t\t// If we're not waiting for GMoN, just clampign so that we don't display negative values\n\t\t\t\tImGui::Text(\"Viewport refresh in: %.3fs\", std::max(0.0f, time_before_refresh_seconds));\n\t\t}\n\t}\n\telse if (render_settings.accumulate)\n\t\t// If the rendering is done, displaying 0.000s\n\t\tImGui::Text(\"Viewport refresh in: 0.000s\");\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tif (render_settings.has_access_to_adaptive_sampling_buffers())\n\t{\n\t\tunsigned int converged_count = m_renderer->get_status_buffer_values().pixel_converged_count;\n\t\tunsigned int total_pixel_count = m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y;\n\n\t\tbool can_print_convergence = false;\n\t\tcan_print_convergence |= render_settings.sample_number > render_settings.adaptive_sampling_min_samples;\n\t\tcan_print_convergence |= render_settings.stop_pixel_noise_threshold > 0.0f;\n\n\t\tif (can_print_convergence)\n\t\t{\n\t\t\tImGui::Text(\"Pixels converged: %d / %d - %.4f%%\", converged_count, total_pixel_count, static_cast<float>(converged_count) / total_pixel_count * 100.0f);\n\n\t\t\t// Adding some information on what noise threshold is being used\n\t\t\tstd::string text = \"Current noise threshold is: \";\n\t\t\tif (render_settings.enable_adaptive_sampling && render_settings.sample_number > render_settings.adaptive_sampling_min_samples)\n\t\t\t{\n\t\t\t\tif (render_settings.stop_pixel_noise_threshold > render_settings.adaptive_sampling_noise_threshold)\n\t\t\t\t\t// If the pixel noise threshold is stronger, then the displayed convergence counter\n\t\t\t\t\t// is going to be according to the stop noise threshold so that's what we're adding in the tooltip\n\t\t\t\t\t// there\n\t\t\t\t\ttext += std::to_string(render_settings.stop_pixel_noise_threshold) + \" (pixel noise threshold)\";\n\t\t\t\telse\n\t\t\t\t\ttext += std::to_string(render_settings.adaptive_sampling_noise_threshold) + \" (adaptive sampling)\";\n\t\t\t}\n\t\t\telse if (render_settings.stop_pixel_noise_threshold > 0.0f)\n\t\t\t\ttext += std::to_string(render_settings.stop_pixel_noise_threshold) + \" (pixel noise threshold)\";\n\t\t\tImGuiRenderer::show_help_marker(text);\n\t\t}\n\t\telse\n\t\t{\n\t\t\tif (render_settings.accumulate)\n\t\t\t\t// No need to show the text if we're not accumulating\n\t\t\t{\n\t\t\t\tImGui::Text(\"Pixels converged: N/A\");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Adaptive sampling hasn't kicked in yet... Convergence computation hasn't started.\");\n\t\t\t}\n\t\t}\n\t}\n\telse\n\t{\n\t\tImGui::Text(\"Pixels converged: N/A\");\n\t\tImGuiRenderer::show_help_marker(\"Convergence is only computed when either adaptive sampling or the \\\"Pixel noise threshold\\\" render stopping condition is used.\");\n\t}\n\n\tif (ImGui::Button(\"Save viewport to PNG\"))\n\t\tm_render_window->get_screenshoter()->write_to_png();\n\tif (ImGui::Button(\"Copy viewport to clipboard\"))\n\t\tUtils::copy_image_to_clipboard(m_render_window->get_screenshoter()->get_image());\n\n\tImGui::Separator();\n\n\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n}\n\nvoid ImGuiSettingsWindow::draw_render_settings_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\t// ImGui::PopItemWidth();\n\tif (!ImGui::CollapsingHeader(\"Render Settings\"))\n\t\treturn;\n\tImGui::TreePush(\"Render settings tree\");\n\n\tstatic int preset_selected = 0;\n\tstd::vector<const char*> preset_items = { \n\t\t\"Default\",\n\t\t\"Reference path-tracer\",\n\t\t\"MIS NEE Path Tracer\",\n\t\t\"RIS NEE Path Tracer\",\n\t\t\"ReSTIR DI Fast\",\n\t\t\"ReSTIR DI Efficiency\",\n\t\t\"ReSTIR GI\",\n\t\t\"ReSTIR DI + GI\"\n\t};\n\tstd::vector<const char*> tooltips = { \n\t\t\"No preset\",\n\t\t\"Reference, no NEE, brute-force path-tracer\",\n\t\t\"NEE with MIS (BSDF + Light sampling) at each vertex of the path\",\n\t\t\"NEE with RIS (N*BSDF + M*Light sampling) at each vertex of the path\",\n\t\t\"Direct lighting only (0 bounce) and ReSTIR DI. Fast settings for better framerates but converges slower than \\\"ReSTIR DI Efficiency\\\"\",\n\t\t\"Direct lighting only (0 bounce) and ReSTIR DI. Heavy settings for the fastest convergence rate\",\n\t\t\"5 bounces with ReSTIR GI and RIS at each vertex of the path\",\n\t\t\"\\\"ReSTIR DI Fast\\\" for the direct lighting + 5 bounces with ReSTIR GI and RIS at each vertex of the path\",\n\t};\n\n\tImGui::SeparatorText(\"Global settings presets\");\n\tif (ImGuiRenderer::ComboWithTooltips(\"Rendering preset\", &preset_selected, preset_items.data(), preset_items.size(), tooltips.data()))\n\t\tapply_performance_preset(static_cast<ImGuiRendererSettingsPreset>(preset_selected));\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::SeparatorText(\"Viewport Settings\");\n\tdisplay_view_selector();\n\n\tstatic float resolution_scaling_current_widget_value = m_application_settings->render_resolution_scale;\n\tImGui::BeginDisabled(m_application_settings->keep_same_resolution);\n\tImGui::InputFloat(\"Resolution scale\", &resolution_scaling_current_widget_value);\n\n\tif (resolution_scaling_current_widget_value != m_application_settings->render_resolution_scale)\n\t{\n\t\tImGui::TreePush(\"Resolution scaling apply button tree\");\n\n\t\tif (ImGui::Button(\"Apply\"))\n\t\t{\n\t\t\tif (resolution_scaling_current_widget_value <= 0.0f)\n\t\t\t\t// Wrong resolution scaling factor, restoring to previous value\n\t\t\t\tresolution_scaling_current_widget_value = m_application_settings->render_resolution_scale;\n\t\t\telse\n\t\t\t{\n\t\t\t\t// Valid scaling factor\n\t\t\t\tm_application_settings->render_resolution_scale = resolution_scaling_current_widget_value;\n\t\t\t\tm_render_window->change_resolution_scaling(resolution_scaling_current_widget_value);\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n\tImGui::EndDisabled();\n\n\tif (ImGui::Checkbox(\"Keep same render resolution\", &m_application_settings->keep_same_resolution))\n\t{\n\t\tif (m_application_settings->keep_same_resolution)\n\t\t{\n\t\t\t// Remembering the width and height we need to target\n\t\t\tm_application_settings->target_width = m_renderer->m_render_resolution.x;\n\t\t\tm_application_settings->target_height = m_renderer->m_render_resolution.y;\n\t\t}\n\t}\n\tImGuiRenderer::show_help_marker(\"Keeps approximately the same render resolution when \"\n\t\t\t\t\t\t\t\t\t\"resizing the application's window.\");\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::SeparatorText(\"General settings\");\n\n\tif (ImGui::Checkbox(\"Accumulate\", &render_settings.accumulate))\n\t{\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tif (!render_settings.accumulate)\n\t\t{\n\t\t\tm_render_window->get_application_settings()->auto_sample_per_frame = false;\n\t\t\trender_settings.samples_per_frame = 1;\n\t\t}\n\t}\n\n\tif (ImGui::InputInt(\"Samples per frame\", &render_settings.samples_per_frame))\n\t{\n\t\t// Clamping to 1\n\t\trender_settings.samples_per_frame = std::max(1, render_settings.samples_per_frame);\n\t\t// If the user manually changed to number of samples per frame, let's disable auto sample per frame\n\t\t// because the user probably doesn't want it\n\t\tm_application_settings->auto_sample_per_frame = false;\n\t}\n\n\tImGui::SameLine();\n\tImGui::Checkbox(\"Auto\", &m_application_settings->auto_sample_per_frame);\n\tif (m_application_settings->auto_sample_per_frame)\n\t{\n\t\tImGui::TreePush(\"Target GPU framerate tree\");\n\t\tif (ImGui::InputFloat(\"Target GPU framerate\", &m_application_settings->target_GPU_framerate))\n\t\t\t// Clamping to 1 FPS because going below that is dangerous in terms of driver timeouts\n\t\t\tm_application_settings->target_GPU_framerate = std::max(1.0f, m_application_settings->target_GPU_framerate);\n\t\tImGuiRenderer::show_help_marker(\"The samples per frame will be automatically adjusted such that the GPU\"\n\t\t\t\" takes approximately 1000.0f / TargetFramerate milliseconds to complete\"\n\t\t\t\" a frame. Useful to keep the GPU busy after almost all pixels have converged.\"\n\t\t\t\" Lowering this settings increases rendering efficiency but can cause camera\"\n\t\t\t\" movements to be stuttery.\");\n\n\t\tImGui::TreePop();\n\t}\n\n\tint nb_bounce_before_change = render_settings.nb_bounces;\n\tif (ImGui::InputInt(\"Max bounces\", &render_settings.nb_bounces))\n\t{\n\t\t// Clamping to 0 in case the user input a negative number of bounces\t\n\t\trender_settings.nb_bounces = std::max(render_settings.nb_bounces, 0);\n\n\t\tif (render_settings.alpha_testing_indirect_bounce >= nb_bounce_before_change + 1)\n\t\t\t// Auto adjusting the alpha testing indirect bounce limit such that, if the alpha test limit\n\t\t\t// was above the maximum number of bounces before (i.e. alpha test are always enabled) we changed\n\t\t\t// the number of bounces, then we want the limit to stay above the maximum (such that alpha tests are\n\t\t\t// still always enabled)\n\t\t\t//\n\t\t\t// This is only for the convenience of the user so that they don't have the go change\n\t\t\t// the alpha test bounce limit after they change the number of bounces: the alpha test limit changes automatically\n\t\t\t// (if the alpha test limit was at the maximum)\n\t\t\trender_settings.alpha_testing_indirect_bounce = render_settings.nb_bounces + 1;\n\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tdraw_russian_roulette_options();\n\n\tImGui::TreePop();\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n}\n\nvoid ImGuiSettingsWindow::draw_render_stopping_conditions_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tif (ImGui::CollapsingHeader(\"Render stopping condition\"))\n\t{\n\t\tImGui::TreePush(\"Stopping condition tree\");\n\t\t{\n\t\t\tif (ImGui::InputInt(\"Max sample count\", &m_application_settings->max_sample_count))\n\t\t\t\tm_application_settings->max_sample_count = std::max(m_application_settings->max_sample_count, 0);\n\t\t\tif (m_renderer->get_gmon_render_pass()->is_render_pass_used())\n\t\t\t{\n\t\t\t\t// Using GMoN\n\n\t\t\t\tunsigned int number_of_sets = m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT);\n\t\t\t\tif (m_application_settings->max_sample_count % number_of_sets != 0)\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Number of samples not divisible GMoN tree\");\n\n\t\t\t\t\t// But the maximum number of samples isn't divisible by the number of sets\n\t\t\t\t\tstd::string warning_text = \"Currently using GMoN (\\\"Post-processing\\\" panel) but the number of \"\n\t\t\t\t\t\t\"maximum samples entered here isn't divisible by the number of GMoN sets. This means that \"\n\t\t\t\t\t\t\"what's displayed in the viewport will only be \"\n\t\t\t\t\t\t+ std::to_string(std::max(1u, m_application_settings->max_sample_count / number_of_sets)) + \" samples instead of \"\n\t\t\t\t\t\t+ std::to_string(m_application_settings->max_sample_count) + \".\\n\\n\"\n\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\"You click the button to the right to round up the maximum number of samples to one that is \"\n\t\t\t\t\t\t\"divisible by the number of GMoN sets (\"\n\t\t\t\t\t\t+ std::to_string(m_renderer->get_global_compiler_options()->get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT)) + \")\";\n\t\t\t\t\tImGuiRenderer::add_warning(warning_text);\n\n\t\t\t\t\tImGui::SameLine();\n\t\t\t\t\tif (ImGui::Button(\"Round up\"))\n\t\t\t\t\t\tm_application_settings->max_sample_count = std::ceil(m_application_settings->max_sample_count / static_cast<float>(number_of_sets)) * number_of_sets;\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (ImGui::InputFloat(\"Max render time (s)\", &m_application_settings->max_render_time))\n\t\t\t\tm_application_settings->max_render_time = std::max(m_application_settings->max_render_time, 0.0f);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tif (!render_settings.accumulate)\n\t\t\t{\n\t\t\t\t// Adding a shortcut button to re-enable accumulation\n\t\t\t\tif (ImGui::Button(\"Enable accumulation\"))\n\t\t\t\t{\n\t\t\t\t\trender_settings.accumulate = true;\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t}\n\t\t\tImGui::BeginDisabled(!render_settings.accumulate); // Cannot use stopping condition if not accumulating\n\t\t\tImGui::SeparatorText(\"Pixel Stop Noise Threshold\");\n\t\t\tImGui::Checkbox(\"Use pixel noise threshold stopping condition\", &render_settings.use_pixel_stop_noise_threshold);\n\t\t\tImGuiRenderer::show_help_marker(\"If enabled, stops the renderer after a certain proportion \"\n\t\t\t\t\"of pixels of the image have converged. \\\"converged\\\" is evaluated according to the \"\n\t\t\t\t\"threshold of the adaptive sampling if it is enabled. If adaptive sampling is not \"\n\t\t\t\t\"enabled, \\\"converged\\\" is defined by the \\\"Pixel noise threshold\\\" variance \"\n\t\t\t\t\"threshold below.\");\n\n\t\t\tImGui::BeginDisabled(!render_settings.use_pixel_stop_noise_threshold);\n\t\t\t{\n\t\t\t\tif (ImGui::InputFloat(\"Pixel proportion\", &render_settings.stop_pixel_percentage_converged))\n\t\t\t\t\trender_settings.stop_pixel_percentage_converged = std::max(0.0f, std::min(render_settings.stop_pixel_percentage_converged, 100.0f));\n\t\t\t\tImGuiRenderer::show_help_marker(\"The proportion of pixels that need to have converge \"\n\t\t\t\t\t\"to the noise threshold for the rendering to stop. In percentage [0, 100].\");\n\t\t\t}\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::BeginDisabled(render_settings.enable_adaptive_sampling || !render_settings.use_pixel_stop_noise_threshold);\n\t\t\t{\n\t\t\t\t// Only letting the user manipulate the stop pixel noise threshold if adaptive sampling is not enabled\n\t\t\t\t// because if adaptive sampling is enabled, then the stop pixel noise threshold feature can only\n\t\t\t\t// be used to give a render stopping condition (after a certain proportion of pixels have converged).\n\t\t\t\t//\n\t\t\t\t// Said otherwise, if adaptive sampling is enabled, then we're not using the stop pixel noise threshold\n\t\t\t\t// at all so it doesn't need to be exposed to the user\n\t\t\t\tif (ImGui::InputFloat(\"Pixel noise threshold\", &render_settings.stop_pixel_noise_threshold))\n\t\t\t\t{\n\t\t\t\t\trender_settings.stop_pixel_noise_threshold = std::max(0.0f, render_settings.stop_pixel_noise_threshold);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tstd::string pixel_noise_threshold_help_string = \"Cannot be set lower than the adaptive sampling threshold. 0.0 to disable.\";\n\t\t\t\tif (render_settings.enable_adaptive_sampling)\n\t\t\t\t\tpixel_noise_threshold_help_string += \"\\n\\nDisabled because adaptive sampling is enabled. Both cannot be used at the same time.\";\n\t\t\t\tImGuiRenderer::show_help_marker(pixel_noise_threshold_help_string);\n\n\t\t\t\tImGui::InputInt(\"Minimum sample count\", &m_application_settings->pixel_stop_noise_threshold_min_sample_count);\n\t\t\t\tImGuiRenderer::show_help_marker(\"How many samples to render before evaluating the number of pixels that have reached \"\n\t\t\t\t\t\"the noise threshold.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"This setting only applies to the \\\"pixel stop noise threshold\\\" feature.\\n\"\n\t\t\t\t\t\"It does not apply to adaptive sampling.\\n\"\n\t\t\t\t\t\"Adaptive sampling has its own minimum sample count.\");\n\n\t\t\t\tif (ImGui::Button(\"Copy adaptive sampling's threshold\"))\n\t\t\t\t{\n\t\t\t\t\trender_settings.stop_pixel_noise_threshold = render_settings.adaptive_sampling_noise_threshold;\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tstd::string copy_button_help_string = \"Copies the adaptive sampling variance threshold for the stop pixel noise threshold.\";\n\t\t\t\tif (render_settings.enable_adaptive_sampling)\n\t\t\t\t\tcopy_button_help_string += \"\\n\\nDisabled because adaptive sampling is enabled. Both cannot be used at the same time.\";\n\t\t\t\tImGuiRenderer::show_help_marker(copy_button_help_string);\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t}\n\t\t\tImGui::EndDisabled(); // render_settings.enable_adaptive_sampling\n\t\t\tImGui::EndDisabled(); // !render_settings.accumulate\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t}\n\t\t// Stopping condition tree\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_russian_roulette_options()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tif (ImGui::Checkbox(\"Do Russian Roulette\", &render_settings.do_russian_roulette))\n\t\tm_render_window->set_render_dirty(true);\n\n\tconst char* items[] = { \"- Max throughput\", \"- Arnold, Langlands, 2014\" };\n\tif (ImGui::Combo(\"Termination method\", (int*)&render_settings.path_russian_roulette_method, items, IM_ARRAYSIZE(items)))\n\t\tm_render_window->set_render_dirty(true);\n\n\tstatic bool min_depth_modified = false;\n\tif (!min_depth_modified)\n\t\trender_settings.russian_roulette_min_depth = std::min(5, render_settings.nb_bounces / 2);\n\tif (ImGui::SliderInt(\"RR min depth\", &render_settings.russian_roulette_min_depth, 0, render_settings.nb_bounces + 1))\n\t{\n\t\tm_render_window->set_render_dirty(true);\n\t\tmin_depth_modified = true;\n\t}\n\tImGuiRenderer::show_help_marker(\"After how many bounces can russian roulette kick in? \"\n\t\t\t\t\t\t\t\t\t\"For example, 0 means that the camera ray hits, and then the next bounce \"\n\t\t\t\t\t\t\t\t\t\"is already susceptible to russian roulette kill. 1 would mean that the first \"\n\t\t\t\t\t\t\t\t\t\"bounce is never going to be cutoff by the russian roulette.\");\n\tif (ImGui::SliderFloat(\"RR throughput clamp\", &render_settings.russian_roulette_throughput_clamp, 1.0f, 20.0f))\n\t\tm_render_window->set_render_dirty(true);\n\tImGuiRenderer::show_help_marker(\"After applying russian roulette (dividing by the continuation probability) \"\n\t\t\t\t\t\t\t\t\t\"the energy added to the ray throughput is clamped to this maximum value.\\n\"\n\t\t\t\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\t\t\t\"This is biased and darkens the image the lower the threshold but it helps \"\n\t\t\t\t\t\t\t\t\t\"reduce variance and fireflies introduced by the russian roulette --> faster \"\n\t\t\t\t\t\t\t\t\t\"convergence.\\n\"\n\t\t\t\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\t\t\t\"0 for no clamping.\");\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::PushStyleColor(ImGuiCol_Button, ImVec4(0.7f, 0.0f, 0.0f, 1.0f));        // Red\n\tImGui::PushStyleColor(ImGuiCol_ButtonHovered, ImVec4(1.0f, 0.2f, 0.2f, 1.0f)); // Lighter red when hovered\n\tImGui::PushStyleColor(ImGuiCol_ButtonActive, ImVec4(0.5f, 0.0f, 0.0f, 1.0f));  // Darker red when clicked\n\tif (ImGui::Button(\"Reset render\"))\n\t\tm_render_window->set_render_dirty(true);\n\tImGui::PopStyleColor(3);\n}\n\nvoid ImGuiSettingsWindow::display_view_selector()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tstd::shared_ptr<DisplayViewSystem> display_view_system = m_render_window->get_display_view_system();\n\n\tstatic std::vector<std::pair<const char*, DisplayViewType>> display_string_to_type = {\n\t\t{ \"- Default\", DisplayViewType::DEFAULT },\n\t\t{ \"- GMoN blend\", DisplayViewType::GMON_BLEND },\n\t\t{ \"- Denoiser blend\", DisplayViewType::DENOISED_BLEND },\n\t\t{ \"- Denoiser - Normals\", DisplayViewType::DISPLAY_DENOISER_NORMALS },\n\t\t{ \"- Denoiser - Albedo\", DisplayViewType::DISPLAY_DENOISER_ALBEDO },\n\t\t{ \"- Pixel convergence heatmap\", DisplayViewType::PIXEL_CONVERGENCE_HEATMAP },\n\t\t{ \"- Converged pixels map\", DisplayViewType::PIXEL_CONVERGED_MAP },\n\t\t{ \"- White Furnace Threshold\", DisplayViewType::WHITE_FURNACE_THRESHOLD }\n\t};\n\n\tstd::vector<const char*> items;\n\tfor (auto view_string_to_DisplayViewType : display_string_to_type)\n\t\titems.push_back(view_string_to_DisplayViewType.first);\n\n\tint display_view_selected_index = display_view_system->get_current_display_view_type();\n\n\tif (ImGui::BeginCombo(\"Display view\", items[display_view_selected_index]))\n\t{\n\t\tfor (int i = 0; i < items.size(); i++)\n\t\t{\n\t\t\tconst bool is_selected = (display_view_selected_index == i);\n\t\t\tbool display_view_is_disabled = display_view_disabled(display_string_to_type[i].second);\n\n\t\t\tif (display_view_is_disabled)\n\t\t\t\tImGui::PushStyleColor(ImGuiCol_Text, ImVec4(0.5f, 0.5f, 0.5f, 1.0f));\n\t\t\tif (ImGui::Selectable(items[i], is_selected))\n\t\t\t{\n\t\t\t\tdisplay_view_selected_index = i;\n\n\t\t\t\tif (display_view_is_disabled)\n\t\t\t\t\t// If we clicked on a display that was disabled, there is an\n\t\t\t\t\t// action to do to enable all the necessary for the display view to work\n\t\t\t\t\tdisplay_view_disabled_action(display_string_to_type[i].second);\n\t\t\t\tdisplay_view_system->queue_display_view_change(static_cast<DisplayViewType>(display_view_selected_index));\n\t\t\t\tm_render_window->set_force_viewport_refresh(true);\n\t\t\t}\n\t\t\tif (display_view_is_disabled)\n\t\t\t{\n\t\t\t\tImGui::PopStyleColor();\n\t\t\t\tdisplay_view_tooltip(display_string_to_type[i].second);\n\t\t\t}\n\n\t\t\tif (is_selected)\n\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t}\n\t\tImGui::EndCombo();\n\t}\n\n\tDisplaySettings& display_settings = display_view_system->get_display_settings();\n\tDisplayViewType display_view_type_selected = static_cast<DisplayViewType>(display_view_selected_index);\n\t// Adding some more UI elements for certain display views\n\tswitch (display_view_type_selected)\n\t{\n\t\tcase DisplayViewType::WHITE_FURNACE_THRESHOLD:\n\t\t\tbool viewport_update_needed = false;\n\n\t\t\tviewport_update_needed  |= ImGui::Checkbox(\"Use low threshold\", &display_settings.white_furnace_display_use_low_threshold);\n\t\t\tImGuiRenderer::show_help_marker(\"If checked, the white furnace threshold shader will display \"\n\t\t\t\t\t\t\t\t\t\t\t\"pixel that lose energy as green. Pixels will not be highlighted \"\n\t\t\t\t\t\t\t\t\t\t\t\"if unchecked\");\n\t\t\tviewport_update_needed |= ImGui::Checkbox(\"Use high threshold\", &display_settings.white_furnace_display_use_high_threshold);\n\t\t\tImGuiRenderer::show_help_marker(\"If checked, the white furnace threshold shader will display \"\n\t\t\t\t\t\t\t\t\t\t\t\"pixel that gain energy as red. Pixels will not be highlighted \"\n\t\t\t\t\t\t\t\t\t\t\t\"if unchecked\");\n\n\t\t\tif (viewport_update_needed)\n\t\t\t\tm_render_window->set_force_viewport_refresh(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tbreak;\n\t}\n}\n\nbool ImGuiSettingsWindow::display_view_disabled(DisplayViewType display_view_type)\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tswitch (display_view_type)\n\t{\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\t\treturn !render_settings.has_access_to_adaptive_sampling_buffers();\n\n\tcase DisplayViewType::GMON_BLEND:\n\t\treturn !m_renderer->get_gmon_render_pass()->is_render_pass_used();\n\n\tcase DisplayViewType::DENOISED_BLEND:\n\t\treturn !m_application_settings->enable_denoising;\n\n\tdefault:\n\t\tbreak;\n\t}\n\n\treturn false;\n}\n\nvoid ImGuiSettingsWindow::display_view_tooltip(DisplayViewType display_view_type)\n{\n\tswitch (display_view_type)\n\t{\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\t\tImGuiRenderer::add_tooltip(\"This display view is unavailabe because adaptive sampling isn't in use. Click to enable adaptive sampling.\");\n\t\treturn;\n\n\tcase DisplayViewType::GMON_BLEND:\n\t\tImGuiRenderer::add_tooltip(\"This display view is disabled because GMoN isn't in use. Click to enable GMoN.\");\n\t\treturn;\n\n\tcase DisplayViewType::DENOISED_BLEND:\n\t\tImGuiRenderer::add_tooltip(\"This display view is disabled because the denoiser isn't enabled. Click to enable the denoiser.\");\n\t\treturn;\n\n\tdefault:\n\t\tbreak;\n\t}\n}\n\nvoid ImGuiSettingsWindow::display_view_disabled_action(DisplayViewType display_view_type)\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tswitch (display_view_type)\n\t{\n\tcase DisplayViewType::PIXEL_CONVERGED_MAP:\n\tcase DisplayViewType::PIXEL_CONVERGENCE_HEATMAP:\n\t\trender_settings.enable_adaptive_sampling = true;\n\t\t\t\n\t\tm_render_window->set_render_dirty(true);\n\n\t\treturn;\n\n\tcase DisplayViewType::GMON_BLEND:\n\t\t// Enabling GMoN\n\t\tm_renderer->get_gmon_render_pass()->get_gmon_data().using_gmon = true;\n\t\ttoggle_gmon();\n\n\t\treturn;\n\n\tcase DisplayViewType::DENOISED_BLEND:\n\t\tImGuiRenderer::add_tooltip(\"This display view is disabled because the denoiser isn't enabled. Click to enable the denoiser.\");\n\t\treturn;\n\n\tdefault:\n\t\tbreak;\n\t}\n}\n\nvoid ImGuiSettingsWindow::apply_performance_preset(ImGuiRendererSettingsPreset performance_preset)\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tswitch (performance_preset)\n\t{\n\tcase SETTINGS_PRESET_DEFAULT:\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_REFERENCE_PATH_TRACER:\n\t\trender_settings.do_alpha_testing = true;\n\t\trender_settings.alpha_testing_indirect_bounce = render_settings.nb_bounces + 1;\n\t\trender_settings.direct_contribution_clamp = 0.0f;\n\t\trender_settings.indirect_contribution_clamp = 0.0f;\n\t\trender_settings.envmap_contribution_clamp = 0.0f;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_NO_DIRECT_LIGHT_SAMPLING);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_NO_SAMPLING);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION, KERNEL_OPTION_FALSE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_MIS_NEE_PATH_TRACER:\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_MIS_LIGHT_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_RIS_NEE_PATH_TRACER:\n\t\trender_settings.ris_settings.number_of_bsdf_candidates = 1;\n\t\trender_settings.ris_settings.number_of_light_candidates = 4;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RIS_BSDF_AND_LIGHT);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_RESTIR_DI_FAST:\n\t\trender_settings.nb_bounces = 0;\n\n\t\trender_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass = true;\n\t\trender_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location = false;\n\t\trender_settings.restir_di_settings.common_spatial_pass.number_of_passes = 1;\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count = 3;\n\t\t// Reuse radius 1% of the resolution\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * 0.01f;\n\n\t\trender_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\trender_settings.restir_di_settings.m_cap = 5;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RESTIR_DI);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS, RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_RESTIR_DI_EFFICIENCY:\n\t\trender_settings.nb_bounces = 0;\n\n\t\trender_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass = true;\n\t\trender_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location = false;\n\t\trender_settings.restir_di_settings.common_spatial_pass.number_of_passes = 2;\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count = 8;\n\t\t// Reuse radius 1% of the resolution\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * 0.01f;\n\n\t\trender_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\trender_settings.restir_di_settings.m_cap = 3;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RESTIR_DI);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS, RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_BSDF);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_RESTIR_GI:\n\t\trender_settings.nb_bounces = 5;\n\t\trender_settings.ris_settings.number_of_bsdf_candidates = 1;\n\t\trender_settings.ris_settings.number_of_light_candidates = 4;\n\n\t\trender_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass = true;\n\t\trender_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location = false;\n\t\trender_settings.restir_di_settings.common_spatial_pass.number_of_passes = 2;\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_neighbor_count = 8;\n\t\t// Reuse radius 1% of the resolution\n\t\trender_settings.restir_di_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * 0.01f;\n\n\t\trender_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\trender_settings.restir_di_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\trender_settings.restir_di_settings.m_cap = 3;\n\n\t\trender_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass = true;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location = false;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.number_of_passes = 2;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.reuse_neighbor_count = 8;\n\t\t// Reuse radius 1% of the resolution\n\t\trender_settings.restir_gi_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * 0.01f;\n\n\t\trender_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\trender_settings.restir_gi_settings.use_jacobian_rejection_heuristic = true;\n\t\trender_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic = true;\n\t\trender_settings.restir_gi_settings.m_cap = 3;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RESTIR_DI);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS, RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS, RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_RESTIR_GI);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tcase SETTINGS_PRESET_RESTIR_DI_GI:\n\t\trender_settings.nb_bounces = 5;\n\t\trender_settings.ris_settings.number_of_bsdf_candidates = 1;\n\t\trender_settings.ris_settings.number_of_light_candidates = 4;\n\n\t\trender_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass = true;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location = false;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.number_of_passes = 2;\n\t\trender_settings.restir_gi_settings.common_spatial_pass.reuse_neighbor_count = 8;\n\t\t// Reuse radius 1% of the resolution\n\t\trender_settings.restir_gi_settings.common_spatial_pass.reuse_radius = hippt::max(m_renderer->m_render_resolution.x, m_renderer->m_render_resolution.y) * 0.01f;\n\n\t\trender_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass = true;\n\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = true;\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_plane_distance_heuristic = true;\n\t\trender_settings.restir_gi_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\t\trender_settings.restir_gi_settings.use_jacobian_rejection_heuristic = true;\n\t\trender_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic = true;\n\t\trender_settings.restir_gi_settings.m_cap = 3;\n\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY, LSS_RIS_BSDF_AND_LIGHT);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS, RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY, KERNEL_OPTION_TRUE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY, KERNEL_OPTION_FALSE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY, ESS_ALIAS_TABLE);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY, PSS_RESTIR_GI);\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, KERNEL_OPTION_TRUE);\n\t\tm_renderer->recompile_kernels();\n\n\t\tm_render_window->set_render_dirty(true);\n\n\t\tbreak;\n\n\tdefault:\n\t\tbreak;\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_camera_panel()\n{\n\tdraw_camera_panel_static(\"Camera\", m_render_window, m_renderer);\n}\n\nvoid ImGuiSettingsWindow::draw_camera_panel_static(const std::string& panel_title, RenderWindow* render_window, std::shared_ptr<GPURenderer> renderer)\n{\n\tHIPRTRenderSettings& render_settings = renderer->get_render_settings();\n\tCamera& camera = renderer->get_camera();\n\n\tif (ImGui::CollapsingHeader(panel_title.c_str()))\n\t{\n\t\tImGui::TreePush(\"Camera tree\");\n\n\t\tImGui::SeparatorText(\"Transformation\");\n\t\tif (ImGui::DragFloat3(\"Position\", reinterpret_cast<float*>(&camera.m_translation)))\n\t\t\trender_window->set_render_dirty(true);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::SeparatorText(\"Settings\");\n\t\tif (ImGui::Checkbox(\"Do ray jittering\", &camera.do_jittering))\n\t\t\trender_window->set_render_dirty(true);\n\n\t\tstatic float camera_fov = camera.vertical_fov * M_INV_PI * 180.0f;\n\t\tif (ImGui::SliderFloat(\"FOV\", &camera_fov, 0.0f, 180.0f, \"%.3fdeg\", ImGuiSliderFlags_AlwaysClamp))\n\t\t{\n\t\t\tcamera.set_FOV_radians(camera_fov / 180.0f * M_PI);\n\n\t\t\trender_window->set_render_dirty(true);\n\t\t}\n\n\t\tif (ImGui::SliderFloat(\"Camera movement speed\", &camera.user_movement_speed_multiplier, 0.0f, 10.0f))\n\t\t\tcamera.user_movement_speed_multiplier = std::max(0.0f, camera.user_movement_speed_multiplier);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::BeginDisabled(!render_settings.accumulate);\n\t\tImGui::Checkbox(\"Render low resolution when interacting\", &render_settings.allow_render_low_resolution);\n\t\tif (!render_settings.accumulate)\n\t\t\tImGuiRenderer::add_tooltip(\"Cannot render at low resolution when not accumulating. If you want to render at \"\n\t\t\t\t\"a lower resolution, you can use the resolution scale in \\\"Render Settings\\\"for that.\");\n\t\tImGui::SliderInt(\"Low resolution scale\", &render_settings.render_low_resolution_scaling, 1, 8);\n\t\tif (!render_settings.accumulate)\n\t\t\tImGuiRenderer::add_tooltip(\"Cannot render at low resolution when not accumulating. If you want to render at \"\n\t\t\t\t\"a lower resolution, you can use the resolution scale in \\\"Render Settings\\\"for that.\");\n\t\tImGui::EndDisabled();\n\n\n\n\n\n\t\tstatic int selected_object = 0;\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::Text(\"Center camera on object\");\n\t\tif (ImGui::BeginListBox(\"##center_on_object\", ImVec2(-FLT_MIN, 7 * ImGui::GetTextLineHeightWithSpacing())))\n\t\t{\n\t\t\tconst std::vector<std::string>& mesh_names = renderer->get_mesh_names();\n\t\t\tconst std::vector<std::string>& material_names = renderer->get_material_names();\n\t\t\tfor (int n = 0; n < mesh_names.size(); n++)\n\t\t\t{\n\t\t\t\tconst bool is_selected = (selected_object == n);\n\n\t\t\t\tconst std::string& mesh_name = mesh_names[n];\n\t\t\t\tconst std::string& material_name = material_names[renderer->get_mesh_material_indices()[n]];\n\t\t\t\tstd::string object_text = mesh_name + \" (\" + material_name + \")\";\n\t\t\t\tif (ImGui::Selectable(object_text.c_str(), is_selected))\n\t\t\t\t{\n\t\t\t\t\tselected_object = n;\n\n\t\t\t\t\tfloat3 object_center = renderer->get_mesh_bounding_boxes()[n].get_center();\n\t\t\t\t}\n\n\t\t\t\t// Set the initial focus when opening the combo (scrolling + keyboard navigation focus)\n\t\t\t\tif (is_selected)\n\t\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t\t}\n\t\t\tImGui::EndListBox();\n\t\t}\n\n\t\tif (ImGui::Button(\"Center\"))\n\t\t{\n\t\t\tcamera.look_at_object(renderer->get_mesh_bounding_boxes()[selected_object]);\n\n\t\t\trender_window->set_render_dirty(true);\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_environment_panel()\n{\n\tbool render_made_piggy = false;\n\n\tif (ImGui::CollapsingHeader(\"Environment\"))\n\t{\n\t\tImGui::TreePush(\"Environment tree\");\n\n\t\tbool has_envmap = m_renderer->has_envmap();\n\t\trender_made_piggy |= ImGui::RadioButton(\"None\", ((int*)&m_renderer->get_world_settings().ambient_light_type), 0); ImGui::SameLine();\n\t\trender_made_piggy |= ImGui::RadioButton(\"Use uniform lighting\", ((int*)&m_renderer->get_world_settings().ambient_light_type), 1); ImGui::SameLine();\n\t\tImGui::BeginDisabled(!has_envmap);\n\t\trender_made_piggy |= ImGui::RadioButton(\"Use envmap lighting\", ((int*)&m_renderer->get_world_settings().ambient_light_type), 2);\n\t\tif (!has_envmap)\n\t\t\t// Showing a tooltip for why the envmap button is disabled\n\t\t\tImGuiRenderer::show_help_marker(\"No envmap loaded.\");\n\t\tImGui::EndDisabled();\n\n\t\tif (m_renderer->get_world_settings().ambient_light_type == AmbientLightType::UNIFORM)\n\t\t{\n\t\t\trender_made_piggy |= ImGui::ColorEdit3(\"Uniform light color\", (float*)&m_renderer->get_world_settings().uniform_light_color, ImGuiColorEditFlags_HDR | ImGuiColorEditFlags_Float);\n\t\t}\n\t\telse if (m_renderer->get_world_settings().ambient_light_type == AmbientLightType::ENVMAP)\n\t\t{\n\t\t\tfloat& rota_X = m_renderer->get_envmap().rotation_X;\n\t\t\tfloat& rota_Y = m_renderer->get_envmap().rotation_Y;\n\t\t\tfloat& rota_Z = m_renderer->get_envmap().rotation_Z;\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tbool rotation_changed = false;\n\t\t\trotation_changed |= ImGui::SliderFloat(\"Envmap rotation X\", &rota_X, 0.0f, 1.0f);\n\t\t\trotation_changed |= ImGui::SliderFloat(\"Envmap rotation Y\", &rota_Y, 0.0f, 1.0f);\n\t\t\trotation_changed |= ImGui::SliderFloat(\"Envmap rotation Z\", &rota_Z, 0.0f, 1.0f);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\trender_made_piggy |= rotation_changed;\n\t\t\trender_made_piggy |= ImGui::SliderFloat(\"Envmap intensity\", (float*)&m_renderer->get_world_settings().envmap_intensity, 0.0f, 10.0f);\n\t\t\tImGui::TreePush(\"Envmap intensity tree\");\n\t\t\trender_made_piggy |= ImGui::Checkbox(\"Scale background intensity\", (bool*)&m_renderer->get_world_settings().envmap_scale_background_intensity);\n\t\t\tif (m_renderer->get_world_settings().envmap_intensity != 1.0f && !m_renderer->get_world_settings().envmap_scale_background_intensity)\n\t\t\t{\n\t\t\t\tImGuiRenderer::add_warning(\"Using a custom envmap intensity without scaling the background \"\n\t\t\t\t\t\"intensity can result in discrepancies when looking at the envmap through a mirror or \"\n\t\t\t\t\t\"transparent glass for example (glass with IOR 1.0f). In these rare cases, the envmap \"\n\t\t\t\t\t\"will appear brighter through the objects than when viewed directly.\\n\"\n\t\t\t\t\t\"This is because scaling the envmap intensity without scaling how it appears to camera rays isn't \"\n\t\t\t\t\t\"physically accurate.\");\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\t// Ensuring no negative light color\n\t\tm_renderer->get_world_settings().uniform_light_color.clamp(0.0f, 1.0e38f);\n\n\t\tImGui::TreePop();\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n\n\tif (render_made_piggy)\n\t\tm_render_window->set_render_dirty(true);\n}\n\nvoid ImGuiSettingsWindow::draw_sampling_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tif (ImGui::CollapsingHeader(\"Sampling\"))\n\t{\n\t\tImGui::TreePush(\"Sampling tree\");\n\n\t\tif (ImGui::CollapsingHeader(\"Adaptive sampling\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Adaptive sampling tree\");\n\n\t\t\tif (!render_settings.accumulate)\n\t\t\t{\n\t\t\t\tif (ImGui::Button(\"Enable accumulation\"))\n\t\t\t\t{\n\t\t\t\t\trender_settings.accumulate = true;\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Cannot use adaptive sampling without accumulation\n\t\t\tImGui::BeginDisabled(!render_settings.accumulate);\n\n\t\t\tif (ImGui::Checkbox(\"Enable adaptive sampling\", (bool*)&render_settings.enable_adaptive_sampling))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (!render_settings.accumulate)\n\t\t\t\tImGuiRenderer::add_tooltip(\"Cannot use adaptive sampling when accumulation is not on.\");\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_RESTIR_DI ||\n\t\t\t\tglobal_kernel_options->get_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY) == PSS_RESTIR_GI)\n\t\t\t{\n\t\t\t\tImGuiRenderer::add_warning(\"Adaptive sampling may not be efficient when used with ReSTIR. This is because \"\n\t\t\t\t\t\"ReSTIR looks around the current pixel to find neighbor to reuse but with adaptive sampling enabled, most pixels \"\n\t\t\t\t\t\"are not going to be sampled anymore. This means that these pixels contain stale samples that strongly biases ReSTIR is reused.\\n\"\n\t\t\t\t\t\"Thus, these sample are not reused to avoid bias but this then drastically reduces the efficiency of ReSTIR and the overall \"\n\t\t\t\t\t\"setup becomes inefficient.\");\n\t\t\t}\n\n\t\t\tfloat adaptive_sampling_noise_threshold_before = render_settings.adaptive_sampling_noise_threshold;\n\t\t\tImGui::BeginDisabled(!render_settings.enable_adaptive_sampling);\n\t\t\tif (ImGui::InputInt(\"Minimum samples\", &render_settings.adaptive_sampling_min_samples))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"How many samples to wait before adaptive sampling activates.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"The general rule is to keep this value as low as possible without getting conspicuous black/unconverged pixels.\");\n\t\t\tif (ImGui::InputFloat(\"Noise threshold\", &render_settings.adaptive_sampling_noise_threshold))\n\t\t\t{\n\t\t\t\trender_settings.adaptive_sampling_noise_threshold = std::max(0.0f, render_settings.adaptive_sampling_noise_threshold);\n\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\t// !Cannot use adaptive sampling without accumulation\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\n\t\t\t// !render_settings.accumulate\n\t\t\tImGui::EndDisabled();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Emissive geometry sampling\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Direct lighting sampling tree\");\n\n\t\t\tbool disabled = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) == LSS_RESTIR_DI;\n\t\t\tImGui::BeginDisabled(disabled);\n\t\t\tstatic int nee_sample_count = DirectLightSamplingNEESampleCount;\n\t\t\tImGui::SliderInt(\"NEE Sample count\", &nee_sample_count, 1, 8);\n\t\t\tImGuiRenderer::show_help_marker(std::string(\"How many light samples to take and shade per each vertex of the \"\n\t\t\t\t\"ray's path.\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"Said otherwise, we're going to run next-event estimation that many \"\n\t\t\t\t\"times per each intersection point along the ray.\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"This is good because this amortizes camera rays and bounce rays i.e. \"\n\t\t\t\t\"we get better shading quality for as many camera rays and bounce rays.\\n\"\n\t\t\t\t\"\\n\"\n\t\t\t\t\"With ReSTIR DI this only applies to the secondary bounces shading.\") +\n\t\t\t\t(disabled ? std::string(\"\\n\\nDisabled because not supported by ReSTIR DI\") : \"\"));\n\t\t\tif (nee_sample_count != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"NEE Sample count apply button\");\n\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_NEE_SAMPLE_COUNT, nee_sample_count);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t}\n\t\t\t\tImGui::TreePop();\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t}\n\t\t\tImGui::EndDisabled();\n\n\t\t\tconst char* items_base_strategy[] = { \"- Uniform sampling\", \"- Power sampling\", \"- ReGIR (Experimental)\"};\n\t\t\tconst char* tooltips_base_strategy[] = {\n\t\t\t\t\"All lights are sampled uniformly.\",\n\n\t\t\t\t\"Lights are sampled proportionally to their power.\",\n\n\t\t\t\t\"Uses ReGIR to sample lights.\\n\\n\"\n\t\t\t\t\"Implementation of[Rendering many lights with grid - based reservoirs, Boksansky, 2021]\"\n\t\t\t};\n\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"Base light sampling strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY), items_base_strategy, IM_ARRAYSIZE(items_base_strategy), tooltips_base_strategy))\n\t\t\t{\n\t\t\t\t// Will recompute the alias table if necessary\n\t\t\t\tm_renderer->recompute_emissives_power_alias_table();\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tconst char* items[] = { \"- No direct light sampling\", \"- Uniform one light\", \"- BSDF Sampling\", \"- MIS (1 Light + 1 BSDF)\", \"- RIS BDSF + Light candidates\", \"- ReSTIR DI (Primary Hit Only)\" };\n\t\t\tconst char* tooltips[] = {\n\t\t\t\t\"No direct light sampling. Emission is only gathered if rays happen to bounce into the lights.\",\n\t\t\t\t\"Samples one random light in the scene without MIS. Efficient as long as there are not too many lights in the scene and no glossy/specular surfaces.\",\n\t\t\t\t\"Samples lights only using one BSDF sample.\",\n\t\t\t\t\"Samples one random light in the scene with MIS(Multiple Importance Sampling) : light sample + BRDF sample.\",\n\t\t\t\t\"Samples lights in the scene with RIS (Resampled Importance Sampling) with both BSDF and light candidates. The number of light or BSDF candidates can be controlled.\",\n\t\t\t\t\"Uses ReSTIR DI to sample direct lighting at the first bounce in the scene. Later bounces use another of the above strategies which can be changed in the ReSTIR DI settings.\"\n\t\t\t};\n\n\t\t\tconst bool no_direct_light_sampling_disabled = false;\n\t\t\tconst bool uniform_one_light_disabled = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR;\n\t\t\tconst bool bsdf_sampling_disabled = false;\n\t\t\tconst bool mis_disabled = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR;\n\t\t\tconst bool ris_disabled = false;\n\t\t\tconst bool restir_di_disabled = false;\n\t\t\tbool disabled_items[] = { no_direct_light_sampling_disabled, uniform_one_light_disabled, bsdf_sampling_disabled, mis_disabled, ris_disabled, restir_di_disabled };\n\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"NEE strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY), items, IM_ARRAYSIZE(items), tooltips, disabled_items))\n\t\t\t{\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR)\n\t\t\t\tdraw_ReGIR_settings_panel();\n\n\t\t\t// Display additional widgets to control the parameters of the direct light\n\t\t\t// sampling strategy chosen (the number of candidates for RIS for example)\n\t\t\tswitch (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY))\n\t\t\t{\n\t\t\tcase LSS_NO_DIRECT_LIGHT_SAMPLING:\n\t\t\t\tbreak;\n\n\t\t\tcase LSS_ONE_LIGHT:\n\t\t\t\tbreak;\n\n\t\t\tcase LSS_MIS_LIGHT_BSDF:\n\t\t\t\tbreak;\n\n\t\t\tcase LSS_RIS_BSDF_AND_LIGHT:\n\t\t\t{\n\t\t\t\tif (ImGui::CollapsingHeader(\"RIS Settings\"))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"RIS Settings tree\");\n\n\t\t\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BASE_STRATEGY) == LSS_BASE_REGIR)\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::Text(\"The mix of BSDF/light samples is controlled\\n\"\n\t\t\t\t\t\t\t\"by the ReGIR settings.\");\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t{\n\t\t\t\t\t\tbool use_visibility_ris_target_function = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION);\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Use visibility in RIS target function\", &use_visibility_ris_target_function))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION, use_visibility_ris_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (ImGui::SliderInt(\"RIS # of BSDF candidates\", &render_settings.ris_settings.number_of_bsdf_candidates, 0, 16))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\trender_settings.ris_settings.number_of_bsdf_candidates = std::max(0, render_settings.ris_settings.number_of_bsdf_candidates);\n\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (ImGui::SliderInt(\"RIS # of light candidates\", &render_settings.ris_settings.number_of_light_candidates, 0, 32))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\trender_settings.ris_settings.number_of_light_candidates = std::max(0, render_settings.ris_settings.number_of_light_candidates);\n\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t}\n\t\t\t\t\t\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tcase LSS_RESTIR_DI:\n\t\t\t{\n\t\t\t\tif (ImGui::CollapsingHeader(\"ReSTIR DI Settings\"))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"ReSTIR DI Settings tree\");\n\n\t\t\t\t\tImGui::Text(\"VRAM Usage: %.3fMB\", m_renderer->get_ReSTIR_DI_render_pass()->get_VRAM_usage());\n\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\tdisplay_ReSTIR_DI_bias_status(global_kernel_options);\n\n\t\t\t\t\tif (ImGui::Checkbox(\"Use Final Visibility\", &render_settings.restir_di_settings.do_final_shading_visibility))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\tif (ImGui::SliderInt(\"M-cap\", &render_settings.restir_di_settings.m_cap, 0, 64, \"%d\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t\t{\n\t\t\t\t\t\trender_settings.restir_di_settings.m_cap = std::max(0, render_settings.restir_di_settings.m_cap);\n\t\t\t\t\t\tif (render_settings.accumulate)\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t}\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"0 disables the M-cap\");\n\n\t\t\t\t\tif (ImGui::CollapsingHeader(\"Rejection Heuristics\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"ReSTIR DI - Rejection Heuristics Tree\");\n\n\t\t\t\t\t\tdraw_ReSTIR_neighbor_heuristics_panel<false>();\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t}\n\n\n\n\n\n\t\t\t\t\tif (ImGui::CollapsingHeader(\"Initial Candidates Pass\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"ReSTIR DI - Initial Candidate Pass Tree\");\n\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tbool do_light_presampling = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING);\n\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Do light presampling\", &do_light_presampling))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_LIGHT_PRESAMPLING, do_light_presampling ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, lights are presampled in a pre-process pass as proposed in\"\n\t\t\t\t\t\t\t\t\" [Rearchitecting Spatiotemporal Resampling for Production, Wyman, Panteleev, 2021]\\n\\n\"\n\t\t\t\t\t\t\t\t\"This improves performance in scenes with dozens of thousands / millions of\"\n\t\t\t\t\t\t\t\t\" lights by avoiding cache trashing because of the memory random walk that\"\n\t\t\t\t\t\t\t\t\" light sampling becomes with that many lights\");\n\t\t\t\t\t\t\tif (do_light_presampling)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tImGui::TreePush(\"Light presampling strategy tree\");\n\n\t\t\t\t\t\t\t\tconst char* items_base_strategy[] = { \"- Uniform sampling\", \"- Power sampling\" };\n\t\t\t\t\t\t\t\tconst char* tooltips_base_strategy[] = {\n\t\t\t\t\t\t\t\t\t\"All lights are sampled uniformly.\",\n\n\t\t\t\t\t\t\t\t\t\"Lights are sampled proportionally to their power.\"\n\t\t\t\t\t\t\t\t};\n\t\t\t\t\t\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"Presampling light strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::RESTIR_DI_LIGHT_PRESAMPLING_STRATEGY), items_base_strategy, IM_ARRAYSIZE(items_base_strategy), tooltips_base_strategy))\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t// Will recompute the alias table if necessary\n\t\t\t\t\t\t\t\t\tm_renderer->recompute_emissives_power_alias_table();\n\n\t\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tbool use_initial_target_function_visibility = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY);\n\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Use visibility in target function\", &use_initial_target_function_visibility))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY, use_initial_target_function_visibility ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use the visibility term in the target function used for \"\n\t\t\t\t\t\t\t\t\"resampling initial candidates\");\n\n\t\t\t\t\t\t\tif (ImGui::SliderInt(\"# of BSDF initial candidates\", &render_settings.restir_di_settings.initial_candidates.number_of_initial_bsdf_candidates, 0, 16))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\t\trender_settings.restir_di_settings.initial_candidates.number_of_initial_bsdf_candidates = std::max(0, render_settings.restir_di_settings.initial_candidates.number_of_initial_bsdf_candidates);\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tif (ImGui::SliderInt(\"# of initial light candidates\", &render_settings.restir_di_settings.initial_candidates.number_of_initial_light_candidates, 0, 32))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\t\trender_settings.restir_di_settings.initial_candidates.number_of_initial_light_candidates = std::max(0, render_settings.restir_di_settings.initial_candidates.number_of_initial_light_candidates);\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tImGui::BeginDisabled(!m_renderer->has_envmap());\n\t\t\t\t\t\t\tif (ImGui::SliderFloat(\"Envmap candidate probability\", &render_settings.restir_di_settings.initial_candidates.envmap_candidate_probability, 0.0f, 1.0f))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\trender_settings.restir_di_settings.initial_candidates.envmap_candidate_probability = hippt::clamp(0.0f, 1.0f, render_settings.restir_di_settings.initial_candidates.envmap_candidate_probability);\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"The probability to sample the envmap per each \\\"initial light candidates\\\"\");\n\t\t\t\t\t\t\tImGui::EndDisabled();\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t}\n\n\n\n\n\n\t\t\t\t\tif (ImGui::CollapsingHeader(\"Visibility Reuse Pass\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"ReSTIR DI - Visibility Reuse Pass Tree\");\n\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tbool do_visibility_reuse = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE);\n\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Do visibility reuse\", &do_visibility_reuse))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE, do_visibility_reuse ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t}\n\n\n\n\n\n\t\t\t\t\tdraw_ReSTIR_temporal_reuse_panel<false>([this, &render_settings]() \n\t\t\t\t\t{\n\t\t\t\t\t\tif (render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass && render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Do Fused Spatiotemporal\", &render_settings.restir_di_settings.do_fused_spatiotemporal))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tm_renderer->get_ReSTIR_DI_render_pass()->request_temporal_bufffers_clear();\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, the spatial and temporal pass will be fused into a single kernel call. \"\n\t\t\t\t\t\t\t\t\"This avoids a synchronization barrier between the temporal pass and the spatial pass \"\n\t\t\t\t\t\t\t\t\"and increases performance. Because the spatial must then resample without the output of the temporal pass, the spatial \"\n\t\t\t\t\t\t\t\t\"pass only resamples on the temporal reservoir buffer, not the temporal + initial candidates reservoir \"\n\t\t\t\t\t\t\t\t\"(which is the output of the temporal pass). This is usually imperceptible.\");\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Do Temporal Reuse\", &render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t\t\tif (!render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\t\t\t\t\t\t// Disabling fused spatiotemporal if we just disabled the temporal reuse\n\t\t\t\t\t\t\t\trender_settings.restir_di_settings.do_fused_spatiotemporal = false;\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\n\t\t\t\t\tImGui::PushItemWidth(12 * ImGui::GetFontSize());\n\t\t\t\t\tdraw_ReSTIR_spatial_reuse_panel<false>([&render_settings, this] () {\n\t\t\t\t\t\tif (render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass && render_settings.restir_di_settings.common_temporal_pass.do_temporal_reuse_pass)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Do fused spatiotemporal\", &render_settings.restir_di_settings.do_fused_spatiotemporal))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tm_renderer->get_ReSTIR_DI_render_pass()->request_temporal_bufffers_clear();\n\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, the spatial and temporal pass will be fused into a single kernel call. \"\n\t\t\t\t\t\t\t\t\"This avois a synchronization barrier between the temporal pass and the spatial pass \"\n\t\t\t\t\t\t\t\t\"and increases performance. Because the spatial must then resample without the output of the temporal pass, the spatial \"\n\t\t\t\t\t\t\t\t\"pass only resamples on the temporal reservoir buffer, not the temporal + initial candidates reservoir \"\n\t\t\t\t\t\t\t\t\"(which is the output of the temporal pass). This is usually imperceptible.\");\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Do spatial reuse\", &render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t\t\tif (!render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\t\t\t\t\t\t\t// Disabling fused spatiotemporal if we just disabled the spatial reuse\n\t\t\t\t\t\t\t\trender_settings.restir_di_settings.do_fused_spatiotemporal = false;\n\t\t\t\t\t\t}\n\t\t\t\t\t});\n\t\t\t\t\tImGui::PopItemWidth();\n\n\n\n\n\t\t\t\t\tdraw_ReSTIR_bias_correction_panel<false>();\n\t\t\t\t\tif (ImGui::CollapsingHeader(\"Debug\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"ReSTIR DI debug options tree\");\n\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Debug neighbor reuse positions\", &render_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location))\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, neighbor in the spatial reuse pass will be hardcoded to always be \"\n\t\t\t\t\t\t\t\"15 pixels to the right, not in a circle. This makes spotting bias easier when debugging.\");\n\t\t\t\t\t\tif (render_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tImGui::TreePush(\"Debug neighbor location vertical tree\");\n\n\t\t\t\t\t\t\tImGui::Text(\"Debug reuse direction\");\n\t\t\t\t\t\t\tbool reuse_direction_changed = false;\n\t\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Horizontally\", ((int*)&render_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location_direction), 0); ImGui::SameLine();\n\t\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Vertically\", ((int*)&render_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location_direction), 1); ImGui::SameLine();\n\t\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Diagonally\", ((int*)&render_settings.restir_di_settings.common_spatial_pass.debug_neighbor_location_direction), 2);\n\n\t\t\t\t\t\t\tif (reuse_direction_changed)\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t}\n\n\n\n\n\t\t\t\t\tif (ImGui::CollapsingHeader(\"Later Bounces Sampling Strategy\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"Later Bounces tree\");\n\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tconst char* second_bounce_items[] = { \"- Uniform one light\", \"- BSDF Sampling\", \"- MIS (1 Light + 1 BSDF)\", \"- RIS BDSF + Light candidates\" };\n\t\t\t\t\t\t\tif (ImGui::Combo(\"Direct Lighting Strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY), second_bounce_items, IM_ARRAYSIZE(second_bounce_items)))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"What direct lighting strategy to use for bounces that come after the first one (camera ray hit) since ReSTIR DI only applies on the first bounce.\");\n\t\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\t\t\t\tswitch (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_LATER_BOUNCES_SAMPLING_STRATEGY))\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\tcase RESTIR_DI_LATER_BOUNCES_UNIFORM_ONE_LIGHT:\n\t\t\t\t\t\t\t\tbreak;\n\n\t\t\t\t\t\t\tcase RESTIR_DI_LATER_BOUNCES_MIS_LIGHT_BSDF:\n\t\t\t\t\t\t\t\tbreak;\n\n\t\t\t\t\t\t\tcase RESTIR_DI_LATER_BOUNCES_RIS_BSDF_AND_LIGHT:\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tbool use_visibility_ris_target_function = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION);\n\t\t\t\t\t\t\t\tif (ImGui::Checkbox(\"Use visibility in RIS target function\", &use_visibility_ris_target_function))\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::RIS_USE_VISIBILITY_TARGET_FUNCTION, use_visibility_ris_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif (ImGui::SliderInt(\"RIS # of BSDF candidates\", &render_settings.ris_settings.number_of_bsdf_candidates, 0, 16))\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\t\t\trender_settings.ris_settings.number_of_bsdf_candidates = std::max(0, render_settings.ris_settings.number_of_bsdf_candidates);\n\n\t\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tif (ImGui::SliderInt(\"RIS # of light candidates\", &render_settings.ris_settings.number_of_light_candidates, 0, 32))\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t// Clamping to 0\n\t\t\t\t\t\t\t\t\trender_settings.ris_settings.number_of_light_candidates = std::max(0, render_settings.ris_settings.number_of_light_candidates);\n\n\t\t\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\tbreak;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop(); // ReSTIR DI Settings tree\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbreak;\n\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) != LSS_BSDF && global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_STRATEGY) != LSS_NO_DIRECT_LIGHT_SAMPLING)\n\t\t\t\tdraw_next_event_estimation_plus_plus_panel();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Envmap sampling\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Envmap sampling tree\");\n\n\t\t\t// Disabled if no envmap loaded\n\t\t\tbool envmap_sampling_disabled = m_renderer->get_envmap().get_width() == 0;\n\n\t\t\tImGui::BeginDisabled(envmap_sampling_disabled);\n\t\t\tconst char* items[] = { \"- No envmap sampling\", \"- Importance Sampling - Binary Search\", \"- Importance Sampling - Alias Table \" };\n\t\t\tconst char* tooltips[] = {\n\t\t\t\t\"The envmap will not be importance sampled. Should behave okay for low frequency envmaps but this is going to be extremely inefficient for high frequency envmaps.\",\n\t\t\t\t\"Importance samples a texel of the environment map proportionally to its luminance using a binary search on the CDF distributions of the envmap luminance. Good convergence.\",\n\t\t\t\t\"Importance samples a texel of the environment map proportionally to its luminance using an alias table for constant time sampling. Good convergence and faster than \\\"Binary Search\\\".\"\n\t\t\t};\n\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"Sampling strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY), items, IM_ARRAYSIZE(items), tooltips))\n\t\t\t{\n\t\t\t\tThreadManager::start_thread(\"RecomputeEnvmapSamplingStructure\", [this]() {\n\t\t\t\t\tm_renderer->get_envmap().recompute_sampling_data_structure(m_renderer.get());\n\t\t\t\t\t});\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\tThreadManager::join_threads(\"RecomputeEnvmapSamplingStructure\");\n\t\t\t}\n\n\t\t\tif (envmap_sampling_disabled)\n\t\t\t\tImGuiRenderer::add_tooltip(\"Disabled because no envmap is loaded in the renderer.\");\n\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_STRATEGY) != ESS_NO_SAMPLING)\n\t\t\t{\n\t\t\t\tImGui::Text(\"Sampling structure VRAM usage: %.3fMB\", m_renderer->get_envmap().get_sampling_structure_VRAM_usage());\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\t// If we do have an importance sampling strategy\n\t\t\t\tbool do_envmap_bsdf_mis = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BSDF_MIS);\n\t\t\t\tif (ImGui::Checkbox(\"Do MIS with BSDF\", &do_envmap_bsdf_mis))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BSDF_MIS, do_envmap_bsdf_mis ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to shoot a BSDF ray when sampling the envmap.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"Useful on specular/glossy surfaces.\");\n\n\t\t\t\tbool do_envmap_bilinear_filtering = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BILINEAR_FILTERING);\n\t\t\t\tif (ImGui::Checkbox(\"Do bilinear filtering\", &do_envmap_bilinear_filtering))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::ENVMAP_SAMPLING_DO_BILINEAR_FILTERING, do_envmap_bilinear_filtering ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to do bilinear filtering when sampling the envmap.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"This is mostly useful when the camera is looking straigth at the envmap and we don't \"\n\t\t\t\t\t\"have camera ray jittering on: in this case, bilinear filtering will hide the \"\n\t\t\t\t\t\"pixelated look of the envmap.\");\n\t\t\t}\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Path sampling\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Path sampling tree\");\n\n\t\t\tconst char* items[] = { \"- BSDF sampling\", \"- ReSTIR GI\" };\n\t\t\tconst char* tooltips[] = {\n\t\t\t\t\"Classical BSDF path tracing: sample the BSDF at each bounce for the next direction.\",\n\t\t\t\t\"Uses ReSTIR GI to resample a path to shade for the pixel.\",\n\t\t\t};\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"Sampling strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY), items, IM_ARRAYSIZE(items), tooltips))\n\t\t\t{\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tswitch (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY))\n\t\t\t{\n\t\t\tcase PSS_RESTIR_GI:\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"ReSTIR GI options tree\");\n\n\t\t\t\tImGui::Text(\"VRAM Usage: %.3fMB\", m_renderer->get_ReSTIR_GI_render_pass()->get_VRAM_usage());\n\t\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION) == KERNEL_OPTION_FALSE)\n\t\t\t\t\tImGuiRenderer::add_warning(\"Due to numerical float imprecisions, errors on specular surfaces (especially glass) \"\n\t\t\t\t\t\t\"are expected with ReSTIR GI if not using \\\"BSDF delta distribution optimization\\\".\"\n\t\t\t\t\t\t\"\\nThis will manifest as some darkening (somewhat similar to rendering with less bounces) on perfectly specular surfaces (delta distributions).\\n\\n\"\n\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\"Enable \\\"BSDF delta distribution optimization\\\" in \\\"Performance Settings\\\" --> \\\"General Settings\\\" to get rid of this issue.\");\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tif (ImGui::SliderInt(\"M-cap\", &render_settings.restir_gi_settings.m_cap, 0, 255, \"%d\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t{\n\t\t\t\t\trender_settings.restir_gi_settings.m_cap = std::max(0, render_settings.restir_gi_settings.m_cap);\n\t\t\t\t\tif (render_settings.accumulate)\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\tif (ImGui::CollapsingHeader(\"Rejection Heuristics\"))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"ReSTIR GI - Rejection Heuristics Tree\");\n\n\t\t\t\t\tdraw_ReSTIR_neighbor_heuristics_panel<true>();\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t}\n\n\t\t\t\tImGui::PushItemWidth(12 * ImGui::GetFontSize());\n\t\t\t\tdraw_ReSTIR_temporal_reuse_panel<true>([&render_settings, this]() {\n\t\t\t\t\tif (ImGui::Checkbox(\"Do Temporal Reuse\", &render_settings.restir_gi_settings.common_temporal_pass.do_temporal_reuse_pass))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t});\n\t\t\t\tdraw_ReSTIR_spatial_reuse_panel<true>([&render_settings, this]() {\n\t\t\t\t\tif (ImGui::Checkbox(\"Do spatial reuse\", &render_settings.restir_gi_settings.common_spatial_pass.do_spatial_reuse_pass))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t});\n\t\t\t\tImGui::PopItemWidth();\n\n\t\t\t\tdraw_ReSTIR_bias_correction_panel<true>();\n\n\t\t\t\tif (ImGui::CollapsingHeader(\"Debug\"))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"ReSTIR GI options tree\");\n\n\t\t\t\t\tif (ImGui::Checkbox(\"Debug neighbor reuse positions\", &render_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, neighbor in the spatial reuse pass will be hardcoded to always be \"\n\t\t\t\t\t\t\"15 pixels to the right, not in a circle. This makes spotting bias easier when debugging.\");\n\t\t\t\t\tif (render_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location)\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"Debug neighbor location vertical tree\");\n\n\t\t\t\t\t\tImGui::Text(\"Debug reuse direction\");\n\t\t\t\t\t\tbool reuse_direction_changed = false;\n\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Horizontally\", ((int*)&render_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location_direction), 0); ImGui::SameLine();\n\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Vertically\", ((int*)&render_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location_direction), 1); ImGui::SameLine();\n\t\t\t\t\t\treuse_direction_changed |= ImGui::RadioButton(\"Diagonally\", ((int*)&render_settings.restir_gi_settings.common_spatial_pass.debug_neighbor_location_direction), 2);\n\n\t\t\t\t\t\tif (reuse_direction_changed)\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\tconst char* debug_view_items[] = { \"No debug view\", \"- Final reservoir UCW\", \"- Final reservoir target function\", \"- Final reservoir weight sum\" , \"- Final reservoir M\", \"- Per pixel reuse radius\", \"- Valid directions percentage\"};\n\t\t\t\t\tif (ImGui::Combo(\"Debug view\", (int*)&render_settings.restir_gi_settings.debug_view, debug_view_items, IM_ARRAYSIZE(debug_view_items)))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tif (ImGui::SliderFloat(\"Debug view scale factor\", &render_settings.restir_gi_settings.debug_view_scale_factor, 0.0f, 1.0f))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\t\t\tdefault:\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Material sampling\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Sampling Materials Tree\");\n\t\t\tdraw_principled_bsdf_energy_conservation();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tif (ImGui::CollapsingHeader(\"Principled BSDF diffuse lobe\"))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"Principled bsdf diffuse lobe tree\");\n\n\t\t\t\tconst char* items[] = { \"- Lambertian\", \"- Oren-Nayar\" };\n\t\t\t\tif (ImGui::Combo(\"Diffuse Lobe\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DIFFUSE_LOBE), items, IM_ARRAYSIZE(items)))\n\t\t\t\t{\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tif (ImGui::CollapsingHeader(\"Principled BSDF glossy lobe\"))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"Principled bsdf glossy lobe tree\");\n\n\t\t\t\tbool sample_glossy_based_on_fresnel = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL);\n\t\t\t\tif (ImGui::Checkbox(\"Fresnel-based sampling##glossy\", &sample_glossy_based_on_fresnel))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_GLOSSY_BASED_ON_FRESNEL, sample_glossy_based_on_fresnel ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to sample the glossy/diffuse base layer of the BSDF based on the fresnel or not.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"This means that the diffuse layer will be sampled more often at normal incidence since this is where \"\n\t\t\t\t\t\"the specular layer reflects close to no light.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"At grazing angle however, where the specular layer reflects the most light(and so the diffuse layer \"\n\t\t\t\t\t\"below isn't reached by that light that is reflected by the specular layer), it is the specular layer \"\n\t\t\t\t\t\"that will be sampled more often.\");\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tif (ImGui::CollapsingHeader(\"Principled BSDF coat lobe\"))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"Principled bsdf coat lobe tree\");\n\n\t\t\t\tbool sample_coat_based_on_fresnel = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL);\n\t\t\t\tif (ImGui::Checkbox(\"Fresnel-based sampling##coat\", &sample_coat_based_on_fresnel))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_SAMPLE_COAT_BASED_ON_FRESNEL, sample_coat_based_on_fresnel ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Same as the glossy layer fresnel-based sampling but for the coat layer.\");\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"GGX\");\n\n\t\t\tstd::vector<const char*> ggx_sampling_items = { \"- VNDF\", \"- VNDF Spherical Caps\" };\n\t\t\tif (ImGui::Combo(\"GGX Sampling Method\", m_renderer->get_global_compiler_options()->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::GGX_SAMPLE_FUNCTION), ggx_sampling_items.data(), ggx_sampling_items.size()))\n\t\t\t{\n\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"How to sample the GGX NDF\");\n\n\t\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\t\tif (ImGui::Combo(\"GGX Masking-Shadowing\", (int*)&render_data.bsdfs_data.GGX_masking_shadowing, masking_shadowing_items.data(), masking_shadowing_items.size()))\n\t\t\t{\n\t\t\t\t// Reloading all the energy compensation textures because if we change the masking-shadowing term,\n\t\t\t\t// the precomputed directional albedo isn't correct anymore\n\t\t\t\tm_renderer->load_GGX_energy_compensation_textures();\n\t\t\t\tm_renderer->load_GGX_glass_energy_compensation_textures();\n\t\t\t\tm_renderer->load_glossy_dielectric_energy_compensation_textures();\n\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Which masking-shadowing term to use with the GGX NDF.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Microfacet model regularization\");\n\t\t\tdraw_microfacet_model_regularization_tree();\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_ReGIR_settings_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tif (ImGui::CollapsingHeader(\"ReGIR Settings\"))\n\t{\n\t\tImGui::TreePush(\"ReGIR settings tree\");\n\n\t\tstd::shared_ptr<ReGIRRenderPass> regir_render_pass = m_renderer->get_ReGIR_render_pass();\n\n\t\tImGui::SeparatorText(\"Grid stats (primary cells | secondary cells)\");\n\t\tImGui::Text(\"# of hash cells occupied: %u | %u\", regir_render_pass->get_number_of_cells_alive(true), regir_render_pass->get_number_of_cells_alive(false));\n\t\tImGui::Text(\"Hash cells capacity: %u | %u\", regir_render_pass->get_total_number_of_cells_alive(true), regir_render_pass->get_total_number_of_cells_alive(false));\n\t\tImGui::Text(\"Load factor: %.3f%% | %.3f%%\", regir_render_pass->get_alive_cells_ratio(true) * 100.0f, regir_render_pass->get_alive_cells_ratio(false) * 100.0f);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::Text(\"VRAM Usage: %.3fMB (avg. %.1fB per cell)\", regir_render_pass->get_VRAM_usage(), regir_render_pass->get_VRAM_usage() * 1000000.0f / ((float)regir_render_pass->get_total_number_of_cells_alive(true) + regir_render_pass->get_total_number_of_cells_alive(false)));\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tconst char* items_base_strategy[] = { \"- Uniform sampling\", \"- Power sampling\" };\n\t\tconst char* tooltips_base_strategy[] = {\n\t\t\t\"All lights are sampled uniformly\",\n\n\t\t\t\"Lights are sampled proportionally to their power\",\n\t\t};\n\t\tif (ImGuiRenderer::ComboWithTooltips(\"Base ReGIR light sampling strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_LIGHT_SAMPLING_BASE_STRATEGY), items_base_strategy, IM_ARRAYSIZE(items_base_strategy), tooltips_base_strategy))\n\t\t{\n\t\t\t// Will recompute the alias table if necessary\n\t\t\tm_renderer->recompute_emissives_power_alias_table();\n\n\t\t\tm_renderer->recompile_kernels();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tReGIRSettings& regir_settings = m_renderer->get_render_settings().regir_settings;\n\n\t\tif (ImGui::CollapsingHeader(\"Grid fill pass\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR grid build tree\");\n\n\t\t\tif (ImGui::Checkbox(\"Do light presampling\", &render_data.render_settings.regir_settings.do_light_presampling))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_DO_LIGHT_PRESAMPLING, render_data.render_settings.regir_settings.do_light_presampling ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to enable light presampling to improve grid fill performance\"\n\t\t\t\t\"on scenes with many many lights.\");\n\t\t\tImGui::BeginDisabled(!render_data.render_settings.regir_settings.do_light_presampling);\n\t\t\tif (ImGui::SliderInt(\"Stratification size\", &regir_settings.presampled_lights.stratification_size, 8, 64))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Subset size\", &regir_settings.presampled_lights.subset_size, 128, 2048))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Subset count\", &regir_settings.presampled_lights.subset_count, 32, 256))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::EndDisabled();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::SeparatorText(\"Primary hits grid cells\");\n\t\t\tif (ImGui::SliderInt(\"Light samples per reservoir\", &regir_settings.grid_fill_settings_primary_hits.light_sample_count_per_cell_reservoir, 0, 64))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Non-canonical reservoirs per grid cell\", regir_settings.grid_fill_settings_primary_hits.get_non_canonical_reservoir_count_per_cell_ptr(), 1, 64))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Canonical reservoirs per grid cell\", regir_settings.grid_fill_settings_primary_hits.get_canonical_reservoir_count_per_cell_ptr(), 1, 16))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::SeparatorText(\"Secondary hits grid cells\");\n\t\t\tif (ImGui::SliderInt(\"Light samples per reservoir##secondary\", &regir_settings.grid_fill_settings_secondary_hits.light_sample_count_per_cell_reservoir, 0, 64))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Non-canonical reservoirs per grid cell##secondary\", regir_settings.grid_fill_settings_secondary_hits.get_non_canonical_reservoir_count_per_cell_ptr(), 1, 64))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Canonical reservoirs per grid cell##secondary\", regir_settings.grid_fill_settings_secondary_hits.get_canonical_reservoir_count_per_cell_ptr(), 1, 16))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tImGui::SeparatorText(\"Common to primary and secondary grid cells\");\n\t\t\tstatic bool visibility_grid_fill_target_function = ReGIR_GridFillTargetFunctionVisibility;\n\t\t\tif (ImGui::Checkbox(\"Use visibility in target function\", &visibility_grid_fill_target_function))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_VISIBILITY, visibility_grid_fill_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use a visibility term in the target function used to resample the reservoirs of the grid cells.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Probably too expensive to be efficient.\");\n\n\t\t\tstatic bool nee_plus_plus_visibility_grid_fill_target_function = ReGIR_GridFillTargetFunctionNeePlusPlusVisibilityEstimation;\n\t\t\tif (ImGui::Checkbox(\"Use NEE++ visibility in target function\", &nee_plus_plus_visibility_grid_fill_target_function))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION, nee_plus_plus_visibility_grid_fill_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to estimate the visibility probability of samples with NEE++ during the grid fill.\");\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS) == KERNEL_OPTION_FALSE && global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY_ESTIMATION) == KERNEL_OPTION_TRUE)\n\t\t\t{\n\t\t\t\tImGuiRenderer::add_warning(\"NEE++ needs to be enabled to use it in ReGIR\");\n\n\t\t\t\tImGui::TreePush(\"Use NEE++ ReGIR Tree\");\n\t\t\t\tuse_next_event_estimation_checkbox(\"Enable NEE++\");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Shortcut for enabling for enabling NEE++\");\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tstatic bool cosine_term_grid_fill_target_function = ReGIR_GridFillTargetFunctionCosineTerm;\n\t\t\tif (ImGui::Checkbox(\"Use cosine term in target function\", &cosine_term_grid_fill_target_function))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM, cosine_term_grid_fill_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use a the cosine term between the direction to the light sample and the \"\n\t\t\t\t\"representative normal of the grid cell in the target function used to resample the reservoirs of the grid cells.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This has no effect is representative points are not being used.\");\n\n\t\t\tstatic bool cosine_term_light_source_grid_fill_target_function = ReGIR_GridFillTargetFunctionCosineTermLightSource;\n\t\t\tif (ImGui::Checkbox(\"Use cosine term light source in target function\", &cosine_term_light_source_grid_fill_target_function))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_TARGET_FUNCTION_COSINE_TERM_LIGHT_SOURCE, cosine_term_light_source_grid_fill_target_function ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Takes the cosine term at the light source (i.e. the cosine term of the geometry term) \"\n\t\t\t\t\"into account when evaluating the target function during grid fill\");\n\n\t\t\tstatic bool bsdf_grid_fill_target_function_first_hits = ReGIR_GridFillPrimaryHitsTargetFunctionBSDF;\n\t\t\tif (ImGui::Checkbox(\"Use BSDF in target function (1st hits)\", &bsdf_grid_fill_target_function_first_hits))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_PRIMARY_HITS_TARGET_FUNCTION_BSDF, bsdf_grid_fill_target_function_first_hits ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to include the BSDF in the target function used for the resampling of the initial candidates\\n\"\n\t\t\t\t\"for the grid fill.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Helps a lot on glossy surfaces.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This option applies to primary hits only and should generally be set to true for better sampling.\");\n\n\t\t\tstatic bool bsdf_grid_fill_target_function_secondary_hits = ReGIR_GridFillSecondaryHitsTargetFunctionBSDF;\n\t\t\tif (ImGui::Checkbox(\"Use BSDF in target function (2nd hits)\", &bsdf_grid_fill_target_function_secondary_hits))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_GRID_FILL_SECONDARY_HITS_TARGET_FUNCTION_BSDF, bsdf_grid_fill_target_function_secondary_hits ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Same as the option for the primary hits but only applies to secondary hits.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This option should be set to false in general as we cannot guess in advance what the view direction is going\\n\"\n\t\t\t\t\"to be at secondary hits(since they can come from anywhere when the rays bounce around the scene) and thus we\\n\"\n\t\t\t\t\"cannot properly evaluate the BRDF for sampling lights.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tif (ImGui::SliderInt(\"Frame skip (1st hits)\", &regir_settings.frame_skip_primary_hit_grid, 0, 8))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"How many frames to skip before running the grid fill and spatial reuse passes again.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"A value of 1 for example means that the grid fill and spatial reuse will be ran at frame 0 \"\n\t\t\t\t\"but not at frame 1. And ran at frame 2 but not at frame 3. ...\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This amortizes the overhead of ReGIR grid fill / spatial reuse by using the fact that each cell \"\n\t\t\t\t\"contains many reservoirs so the same cell can be used multiple times before all reservoirs have been used \"\n\t\t\t\t\"and new samples are necessary.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"The frame skips can be different for filling the primary or secondary hits grid.\");\n\t\t\tif (ImGui::SliderInt(\"Frame skip (2nd hits)\", &regir_settings.frame_skip_secondary_hit_grid, 0, 8))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"How many frames to skip before running the grid fill and spatial reuse passes again.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"A value of 1 for example means that the grid fill and spatial reuse will be ran at frame 0 \"\n\t\t\t\t\"but not at frame 1. And ran at frame 2 but not at frame 3. ...\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This amortizes the overhead of ReGIR grid fill / spatial reuse by using the fact that each cell \"\n\t\t\t\t\"contains many reservoirs so the same cell can be used multiple times before all reservoirs have been used \"\n\t\t\t\t\"and new samples are necessary.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"The frame skips can be different for filling the primary or secondary hits grid.\");\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Spatial reuse\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR spatial reuse tree\");\n\n\t\t\tif (ImGui::Checkbox(\"Do spatial reuse\", &regir_settings.spatial_reuse.do_spatial_reuse))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::Checkbox(\"Do coalesced spatial reuse\", &regir_settings.spatial_reuse.do_coalesced_spatial_reuse))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"If true, the same random seed will be used by all grid cells during the spatial reuse for a given frame\\n.\"\n\t\t\t\t\"This has the effect of coalescing neighbors memory accesses which improves performance\");\n\t\t\tif (ImGui::SliderInt(\"Spatial reuse pass count\", &regir_settings.spatial_reuse.spatial_reuse_pass_count, 1, 4))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tif (ImGui::SliderInt(\"Neighbor reuse count\", &regir_settings.spatial_reuse.spatial_neighbor_count, 0, 32))\n\t\t\t\tm_render_window\t->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"How many cells around the center cell to reuse from.\");\n\n\t\t\tif (ImGui::SliderInt(\"Reuse per neighbor count\", &regir_settings.spatial_reuse.reuse_per_neighbor_count, 1, 16))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"How many reservoirs to reuse per neighbor cell.\");\n\n\t\t\tif (ImGui::SliderInt(\"Retries per neighbor\", &regir_settings.spatial_reuse.retries_per_neighbor, 1, 16))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\" When picking a random cell in the neighborhood for reuse, if that \"\n\t\t\t\t\"cell is out of the grid or if that cell is not alive etc..., we're \"\n\t\t\t\t\"going to retry another cell this many times.\\n\"\n\t\t\t\t\"This improves the chances that we're actually going to have a good \"\n\t\t\t\t\"neighbor to reuse from --> more reuse --> less variance.\");\n\n\t\t\tif (ImGui::SliderInt(\"Reuse radius\", &regir_settings.spatial_reuse.spatial_reuse_radius, 1, 3))\n\t\t\t\tm_render_window\t->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"Radius in cell in which to reuse around the center cell.\\n\"\n\t\t\t\t\"A radius of 1 means that we're going to reuse in the 3x3 cube around the center cell, givins us 26 neighbors\");\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Shading\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR shading tree\");\n\n\t\t\tif (ImGui::SliderInt(\"Neighbors resampled\", &regir_settings.shading.number_of_neighbors, 1, 8))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Resample per neighbor\", &regir_settings.shading.reservoir_tap_count_per_neighbor, 1, 8))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\tstatic bool use_vis_shading_resampling = ReGIR_ShadingResamplingTargetFunctionVisibility;\n\t\t\tif (ImGui::Checkbox(\"Use visibility in target function\", &use_vis_shading_resampling))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_VISIBILITY, use_vis_shading_resampling ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use a shadow ray in the target function when \"\n\t\t\t\t\"shading a point at path tracing time. This reduces visibility noise.\");\n\n\t\t\tstatic bool use_nee_plus_plus_vis_shading_resampling = ReGIR_ShadingResamplingTargetFunctionNeePlusPlusVisibility;\n\t\t\tif (ImGui::Checkbox(\"Use NEE++ visibility estimation in target function\", &use_nee_plus_plus_vis_shading_resampling))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY, use_nee_plus_plus_vis_shading_resampling ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use NEE++ to estimate the visibility probability of the reservoir being resampled during \"\n\t\t\t\t\"shading such that reservoirs that are likely to be occluded will have a lower resampling probability\");\n\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS) == KERNEL_OPTION_FALSE && global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_TARGET_FUNCTION_NEE_PLUS_PLUS_VISIBILITY) == KERNEL_OPTION_TRUE)\n\t\t\t{\n\t\t\t\tImGuiRenderer::add_warning(\"NEE++ needs to be enabled to use it in ReGIR\");\n\n\t\t\t\tImGui::TreePush(\"Use NEE++ ReGIR Tree\");\n\t\t\t\tuse_next_event_estimation_checkbox(\"Enable NEE++\");\n\t\t\t\tImGuiRenderer::show_help_marker(\"Shortcut for enabling for enabling NEE++\");\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tstatic bool do_resampling_bsdf_mis = ReGIR_ShadingResamplingDoBSDFMIS;\n\t\t\tif (ImGui::Checkbox(\"Do BSDF MIS during resampling\", &do_resampling_bsdf_mis))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_BSDF_MIS, do_resampling_bsdf_mis ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to incorporate BSDF samples with MIS during shading resampling.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tstatic bool do_pairwise_MIS = ReGIR_ShadingResamplingDoMISPairwiseMIS;\n\t\t\tif (ImGui::Checkbox(\"Pairwise MIS\", &do_pairwise_MIS))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_DO_MIS_PAIRWISE_MIS, do_pairwise_MIS ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use Pairwise MIS weights for weighting the different samples at shading-resampling time.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"If this is false, 1 / Z MIS weights will be used instead which are potentially faster but definitely have more variance.\");\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Jittering\");\n\t\t\tif (ImGui::Checkbox(\"Do cell jittering (1st hits)\", &regir_settings.shading.do_cell_jittering_first_hits))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::Checkbox(\"Do cell jittering (2nd hits)\", &regir_settings.shading.do_cell_jittering_secondary_hits))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tstatic bool jitter_canonical = ReGIR_ShadingResamplingJitterCanonicalCandidates;\n\t\t\tif (ImGui::Checkbox(\"Jitter canonical candidates\", &jitter_canonical))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESMAPLING_JITTER_CANONICAL_CANDIDATES, jitter_canonical ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to jitter canonical candidates during the shading resampling.\\n\"\n\t\t\t\t\"This reduces grid artifacts but increases variance.\");\n\t\t\tImGui::BeginDisabled(!regir_settings.shading.do_cell_jittering_first_hits && !regir_settings.shading.do_cell_jittering_secondary_hits);\n\t\t\tif (ImGui::SliderFloat(\"Jittering radius\", &regir_settings.shading.jittering_radius, 0.5f, 2.0f))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tstatic int jitter_tries = ReGIR_ShadingJitterTries;\n\t\t\tImGui::SliderInt(\"Jitter tries\", &jitter_tries, 1, 16);\n\t\t\tImGuiRenderer::show_help_marker(\"If using jittering, how many tries to perform to find a good neighbor at shading time ?\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This is because with jittering, our jittered position may end up outside of the grid \"\n\t\t\t\t\"or in an empty cell, in which case we want to retry with a differently jittered position \"\n\t\t\t\t\"to try and find a good neighbor\");\n\t\t\tif (jitter_tries != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_JITTER_TRIES))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"Apply jitter tries regir\");\n\n\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_JITTER_TRIES, jitter_tries);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\t\t\tImGui::EndDisabled();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Correlation reduction\");\n\t\t\tif (ImGui::Checkbox(\"Correlation reduction\", &regir_settings.supersampling.do_correlation_reduction))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tif (ImGui::SliderInt(\"Correlation reduction factor\", &regir_settings.supersampling.correlation_reduction_factor, 1, 8))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Multiple shading\");\n\t\t\tstatic bool shade_all_samples = ReGIR_ShadingResamplingShadeAllSamples;\n\t\t\tif (ImGui::Checkbox(\"Shade all samples\", &shade_all_samples))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_SHADING_RESAMPLING_SHADE_ALL_SAMPLES, shade_all_samples ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"If true, all samples resampled will be shaded instead of shading only the reservoir result of the resampling.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"This massively improves quality at the cost of performance and is very likely to be worth it for scenes that are not\\n\"\n\t\t\t\t\"too hard to trace (where shadow rays are expensive).\");\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Hash grid\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR hash grid tree\");\n\n\t\t\tstatic bool constant_grid_cell_size = ReGIR_HashGridConstantGridCellSize;\n\t\t\tif (ImGui::Checkbox(\"Constant grid cell size\", &constant_grid_cell_size))\n\t\t\t{\n\t\t\t\tregir_settings.hash_grid.m_grid_cell_min_size = constant_grid_cell_size ? 0.75f : 0.2f;\n\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_CONSTANT_GRID_CELL_SIZE, constant_grid_cell_size ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use constant grid cell size for the hash grid.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"If this is false, the grid cell size will increase(cells gets bigger) the further away \"\n\t\t\t\t\"from the camera.This can help with performance and the number of resident cells \"\n\t\t\t\t\"in the hash grid but it tends to hurt quality because of the reduced grid cell resolution.\");\n\n\t\t\tImGui::BeginDisabled(constant_grid_cell_size);\n\t\t\tif (ImGui::SliderFloat(\"Grid cell target projected size\", &regir_settings.hash_grid.m_grid_cell_target_projected_size, 5, 25))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"The target screen-space size (in pixels) that a grid cell should occupy on the screen.\\n\"\n\t\t\t\t\"This has the effect of making the grid cells larger in the distance so that the projected size stays approximately constant.\");\n\t\t\tImGui::EndDisabled();\n\n\t\t\tstd::string grid_cell_size_text = constant_grid_cell_size ? \"Grid cell size\" : \"Grid cell minimum size\";\n\t\t\tif (ImGui::SliderFloat(grid_cell_size_text.c_str(), &regir_settings.hash_grid.m_grid_cell_min_size, 0.1, 0.5))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"The minimum size of a grid cell in world space units\");\n\n\t\t\tstatic bool include_normals_in_hash = ReGIR_HashGridHashSurfaceNormal;\n\t\t\tif (ImGui::Checkbox(\"Use surface normal in hash\", &include_normals_in_hash))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL, include_normals_in_hash ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use the surface normal in the hash function of the hash grid. Increases quality but also significantly increases memory usage\");\n\t\t\tif (include_normals_in_hash)\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"ReGIR surface normal discretization tree\");\n\n\t\t\t\tstatic int normal_discretization_precision = ReGIR_HashGridHashSurfaceNormalResolutionPrimaryHits;\n\t\t\t\tImGui::SliderInt(\"Precision 1st hits\", &normal_discretization_precision, 2, 4);\n\t\t\t\tImGuiRenderer::show_help_marker(\"Higher values mean more precision for the discretization but also more computational and VRAM usage for filling the grid as well as a potentially decreased spatial reuse efficiency.\");\n\n\t\t\t\tif (normal_discretization_precision != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Apply button ReGIR normal discretization\");\n\n\t\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_PRIMARY_HITS, normal_discretization_precision);\n\n\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tif (include_normals_in_hash)\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"ReGIR surface normal discretization 2nd hits tree\");\n\n\t\t\t\tstatic int normal_discretization_precision = ReGIR_HashGridHashSurfaceNormalResolutionSecondaryHits;\n\t\t\t\tImGui::SliderInt(\"Precision 2nd hits\", &normal_discretization_precision, 2, 4);\n\t\t\t\tImGuiRenderer::show_help_marker(\"Higher values mean more precision for the discretization but also more computational and VRAM usage for filling the grid as well as a potentially decreased spatial reuse efficiency.\");\n\n\t\t\t\tif (normal_discretization_precision != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Apply button ReGIR normal discretization\");\n\n\t\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_HASH_SURFACE_NORMAL_RESOLUTION_SECONDARY_HITS, normal_discretization_precision);\n\n\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tstatic bool adaptive_roughness_grid_precision = ReGIR_HashGridAdaptiveRoughnessGridPrecision;\n\t\t\tif (ImGui::Checkbox(\"Adaptive roughness grid precision\", &adaptive_roughness_grid_precision))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_ADAPTIVE_ROUGHNESS_GRID_PRECISION, adaptive_roughness_grid_precision ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::SeparatorText(\"Collision resolution\");\n\t\t\tconst char* items[] = { \"- Linear probing\", \"- Rehashing\" };\n\t\t\tconst char* tooltips[] = {\n\t\t\t\t\"If a collision is found, look up the next index in the hash\\n\"\n\t\t\t\t\"table and see if that location is empty. If not empty, continue\\n\"\n\t\t\t\t\"looking at the next location up to the maximum number of steps' times.\",\n\n\t\t\t\t\"If a collision is found, hash the current cell index to get the\\n\"\n\t\t\t\t\"new candidate location. Continue doing so until an empty location\\n\"\n\t\t\t\t\"is found or the maximum number of steps is exceeded.\",\n\t\t\t};\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"Mode\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MODE), items, IM_ARRAYSIZE(items), tooltips))\n\t\t\t{\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tstatic int linear_probing_steps = ReGIR_HashGridCollisionResolutionMaxSteps;\n\t\t\tImGui::SliderInt(\"Max. steps\", &linear_probing_steps, 1, 32);\n\t\t\tif (linear_probing_steps != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"ReGIR linear probing steps apply button\");\n\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::REGIR_HASH_GRID_COLLISION_RESOLUTION_MAX_STEPS, linear_probing_steps);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t}\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Performance\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR Performance tree\");\n\n\t\t\tif (ImGui::Checkbox(\"Do asynchronous compute\", &regir_settings.do_asynchronous_compute))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t\tif (ImGui::CollapsingHeader(\"Debug\"))\n\t\t{\n\t\t\tImGui::TreePush(\"ReGIR Settings debug tree\");\n\n\t\t\tint regir_debug_mode = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::REGIR_DEBUG_MODE);\n\t\t\tconst char* items[] = { \"- No debug\", \"- Grid cells\", \"- Average non-canonical cell-reservoirs contrib\", \"- Average canonical cell-reservoirs contrib\", \"- Cell representative points\", \"- Cell representative normals\", \"- Sampling fallback\"};\n\t\t\tif (ImGui::Combo(\"Debug mode\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::REGIR_DEBUG_MODE), items, IM_ARRAYSIZE(items)))\n\t\t\t{\n\t\t\t\tif (regir_debug_mode == REGIR_DEBUG_MODE_REPRESENTATIVE_POINTS)\n\t\t\t\t\t// Auto settings this to arbitrary 0.1f to help with visualization\n\t\t\t\t\tregir_settings.debug_view_scale_factor = 0.1f;\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tif (regir_debug_mode == REGIR_DEBUG_MODE_AVERAGE_CELL_NON_CANONICAL_RESERVOIR_CONTRIBUTION || regir_debug_mode == REGIR_DEBUG_MODE_AVERAGE_CELL_CANONICAL_RESERVOIR_CONTRIBUTION)\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Debug view scale factor\", &regir_settings.debug_view_scale_factor, 0.0f, 5.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\telse if (regir_debug_mode == REGIR_DEBUG_MODE_REPRESENTATIVE_POINTS)\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Distance to point\", &regir_settings.debug_view_scale_factor, 0.0f, 1.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::TreePop();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n}\n\ntemplate <bool IsReSTIRGI>\nvoid ImGuiSettingsWindow::draw_ReSTIR_neighbor_heuristics_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tReSTIRCommonSettings& common_settings = [&render_settings] {\n\t\tif constexpr (IsReSTIRGI)\n\t\t\treturn std::ref(render_settings.restir_gi_settings);\n\t\telse \n\t\t\treturn std::ref(render_settings.restir_di_settings);\n\t}();\n\n\tstatic bool use_heuristics_at_all = true;\n\tstatic bool use_normal_heuristic_backup = common_settings.neighbor_similarity_settings.use_normal_similarity_heuristic;\n\tstatic bool use_plane_distance_heuristic_backup = common_settings.neighbor_similarity_settings.use_plane_distance_heuristic;\n\tstatic bool use_roughness_heuristic_backup = common_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic;\n\n\t// For ReSTIR GI only\n\tstatic bool use_neighbor_sample_point_roughness_heuristic_backup = render_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic;\n\tstatic bool use_jacobian_heuristic_backup = render_settings.restir_gi_settings.use_jacobian_rejection_heuristic;\n\n\tif (ImGui::Checkbox(\"Use Heuristics for neighbor rejection\", &use_heuristics_at_all))\n\t{\n\t\tif (!use_heuristics_at_all)\n\t\t{\n\t\t\t// Saving the usage of the heuristics for later restoration\n\t\t\tuse_normal_heuristic_backup = common_settings.neighbor_similarity_settings.use_normal_similarity_heuristic;\n\t\t\tuse_plane_distance_heuristic_backup = common_settings.neighbor_similarity_settings.use_plane_distance_heuristic;\n\t\t\tuse_roughness_heuristic_backup = common_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic;\n\n\t\t\tcommon_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = false;\n\t\t\tcommon_settings.neighbor_similarity_settings.use_plane_distance_heuristic = false;\n\t\t\tcommon_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = false;\n\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\t// Only disabling the jacobian heuristic if this is the ReSTIR GI Imgui interface\n\t\t\t\tuse_jacobian_heuristic_backup = render_settings.restir_gi_settings.use_jacobian_rejection_heuristic;\n\t\t\t\tuse_neighbor_sample_point_roughness_heuristic_backup = render_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic;\n\n\t\t\t\trender_settings.restir_gi_settings.use_jacobian_rejection_heuristic = false;\n\t\t\t\trender_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic = false;\n\t\t\t}\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Restoring heuristics usage to their backup values\n\t\t\tcommon_settings.neighbor_similarity_settings.use_normal_similarity_heuristic = use_normal_heuristic_backup;\n\t\t\tcommon_settings.neighbor_similarity_settings.use_plane_distance_heuristic = use_plane_distance_heuristic_backup;\n\t\t\tcommon_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic = use_roughness_heuristic_backup;\n\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t{\n\t\t\t\trender_settings.restir_gi_settings.use_jacobian_rejection_heuristic = use_jacobian_heuristic_backup;\n\t\t\t\trender_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic = use_neighbor_sample_point_roughness_heuristic_backup;\n\t\t\t}\n\t\t}\n\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\tImGuiRenderer::show_help_marker(\"Using heuristics to reject neighbor that are too dissimilar (in \"\n\t\t\"terms of normal orientation/roughnes/... to the pixel doing the resampling \"\n\t\t\"can help reduce variance. It also reduces bias but never removes it \"\n\t\t\"completely, it just makes it less obvious.\");\n\n\tif (use_heuristics_at_all)\n\t{\n\t\tImGui::TreePush(\"ReSTIR Heursitics Tree\");\n\t\tImGui::Dummy(ImVec2(0.0f, 10.0f));\n\n\n\t\t{\n\t\t\tif (ImGui::Checkbox(\"Use normal similarity heuristic\", &common_settings.neighbor_similarity_settings.use_normal_similarity_heuristic))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::TreePush(\"Normal similarity heuristic tree\");\n\t\t\tif (ImGui::Checkbox(\"Use geometric normals\", &common_settings.neighbor_similarity_settings.reject_using_geometric_normals))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tif (common_settings.neighbor_similarity_settings.use_normal_similarity_heuristic)\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Angle threshold\", &common_settings.neighbor_similarity_settings.normal_similarity_angle_degrees, 0.1f, 90.0f, \"%.3f deg\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t{\n\t\t\t\t\tcommon_settings.neighbor_similarity_settings.normal_similarity_angle_precomp = std::cos(common_settings.neighbor_similarity_settings.normal_similarity_angle_degrees * M_PI / 180.0f);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t}\n\t\t\tImGui::TreePop();\n\t\t}\n\t\tImGui::Dummy(ImVec2(0.0f, 10.0f));\n\n\n\t\t{\n\t\t\tif (ImGui::Checkbox(\"Use plane distance heuristic\", &common_settings.neighbor_similarity_settings.use_plane_distance_heuristic))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::TreePush(\"Plane distance heuristic tree\");\n\t\t\tif (common_settings.neighbor_similarity_settings.use_plane_distance_heuristic)\n\t\t\t\tif (ImGui::SliderFloat(\"Distance threshold\", &common_settings.neighbor_similarity_settings.plane_distance_threshold, 0.0f, 1.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::TreePop();\n\t\t}\n\t\tImGui::Dummy(ImVec2(0.0f, 10.0f));\n\n\n\t\t{\n\t\t\tif (ImGui::Checkbox(\"Use roughness heuristic\", &common_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::TreePush(\"Roughness heuristic tree\");\n\t\t\tif (common_settings.neighbor_similarity_settings.use_roughness_similarity_heuristic)\n\t\t\t\tif (ImGui::SliderFloat(\"Roughness threshold\", &common_settings.neighbor_similarity_settings.roughness_similarity_threshold, 0.0f, 1.0f, \"%.3f\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::TreePop();\n\t\t}\n\n\n\t\tif constexpr (IsReSTIRGI)\n\t\t{\n\t\t\tImGui::Dummy(ImVec2(0.0f, 10.0f));\n\t\t\tif (ImGui::Checkbox(\"Use jacobian heuristic\", &render_settings.restir_gi_settings.use_jacobian_rejection_heuristic))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::TreePush(\"Jacobian heuristic tree\");\n\t\t\tif (render_settings.restir_gi_settings.use_jacobian_rejection_heuristic)\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Jacobian threshold\", render_settings.restir_gi_settings.get_jacobian_heuristic_threshold_pointer(), 5.0f, 100.0f))\n\t\t\t\t{\n\t\t\t\t\trender_settings.restir_gi_settings.set_jacobian_heuristic_threshold(hippt::max(1.001f, render_settings.restir_gi_settings.get_jacobian_heuristic_threshold()));\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t}\n\t\t\tImGui::TreePop();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 10.0f));\n\n\t\t\tif (ImGui::Checkbox(\"Use sample point roughness heuristic\", &render_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGuiRenderer::show_help_marker(\"If the roughness of the neighbor's sample point is lower than this threshold, the neighbor \"\n\t\t\t\t\"won't be reused\\n\"\n\t\t\t\t\"If the neighbor's sample point's roughness is higher than the threshold, it can be reused.\\n\"\n\t\t\t\t\"This is pretty much necessary to avoid \\\"bias\\\" (although this isn't stricly bias, more like extremely \"\n\t\t\t\t\"high variance) when the primary hit (visible point) is on a rough surface and the secondary hit (sample point) is on a \"\n\t\t\t\t\"specular surface: a rough primary hit bouncing into a window / mirror for example.\");\n\n\t\t\tImGui::TreePush(\"Sample point roughness heuristic tree\");\n\t\t\tif (render_settings.restir_gi_settings.use_neighbor_sample_point_roughness_heuristic)\n\t\t\t\tif (ImGui::SliderFloat(\"Min. neighbor roughness\", &render_settings.restir_gi_settings.neighbor_sample_point_roughness_threshold, 0.0f, 1.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::TreePop();\n\t\t}\n\t\t// ReSTIR DI Heursitics Tree\n\t\tImGui::TreePop();\n\t}\n}\n\ntemplate <bool IsReSTIRGI>\nvoid ImGuiSettingsWindow::draw_ReSTIR_temporal_reuse_panel(std::function<void(void)> draw_before_panel)\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tReSTIRCommonTemporalPassSettings& restir_common_temporal_settings = IsReSTIRGI ? m_renderer->get_render_settings().restir_gi_settings.common_temporal_pass : m_renderer->get_render_settings().restir_di_settings.common_temporal_pass;\n\n\tif (ImGui::CollapsingHeader(\"Temporal Reuse Pass\"))\n\t{\n\t\tImGui::PushID(&restir_common_temporal_settings);\n\t\tImGui::TreePush(\"ReSTIR - Temporal Reuse Pass Tree\");\n\t\t{\n\t\t\tdraw_before_panel();\n\n\t\t\tif (restir_common_temporal_settings.do_temporal_reuse_pass)\n\t\t\t{\n\t\t\t\t// Same line as \"Do Temporal Reuse\"\n\t\t\t\tImGui::SameLine();\n\t\t\t\tif (ImGui::Button(\"Reset Temporal Reservoirs\"))\n\t\t\t\t{\n\t\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t\t\tm_renderer->get_ReSTIR_GI_render_pass()->request_temporal_bufffers_clear();\n\t\t\t\t\telse\n\t\t\t\t\t\tm_renderer->get_ReSTIR_DI_render_pass()->request_temporal_bufffers_clear();\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tbool last_frame_g_buffer_needed = true;\n\t\t\t\tlast_frame_g_buffer_needed &= !render_settings.accumulate;\n\t\t\t\tlast_frame_g_buffer_needed &= restir_common_temporal_settings.do_temporal_reuse_pass;\n\n\t\t\t\tif (ImGui::SliderInt(\"Max temporal neighbor search count\", &restir_common_temporal_settings.max_neighbor_search_count, 0, 16))\n\t\t\t\t{\n\t\t\t\t\t// Clamping\n\t\t\t\t\trestir_common_temporal_settings.max_neighbor_search_count = std::max(0, restir_common_temporal_settings.max_neighbor_search_count);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tif (ImGui::SliderInt(\"Temporal neighbor search radius\", &restir_common_temporal_settings.neighbor_search_radius, 0, 16))\n\t\t\t\t{\n\t\t\t\t\t// Clamping\n\t\t\t\t\trestir_common_temporal_settings.neighbor_search_radius = std::max(0, restir_common_temporal_settings.neighbor_search_radius);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tif (ImGui::Checkbox(\"Use Permutation Sampling\", &restir_common_temporal_settings.use_permutation_sampling))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"If true, the back-projected position of the current pixel (temporal neighbor position) will be shuffled\"\n\t\t\t\t\t\" to add temporal variations.\");\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tint& m_cap = IsReSTIRGI ? m_renderer->get_render_settings().restir_gi_settings.m_cap : m_renderer->get_render_settings().restir_di_settings.m_cap;\n\t\t\t\tif (ImGui::SliderInt(\"M-cap\", &m_cap, 0, 255, \"%d\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t{\n\t\t\t\t\tm_cap = std::max(0, m_cap);\n\t\t\t\t\tif (render_settings.accumulate)\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t\tImGui::PopID();\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\t}\n}\n\ntemplate <bool IsReSTIRGI>\nvoid ImGuiSettingsWindow::draw_ReSTIR_spatial_reuse_panel(std::function<void(void)> draw_before_panel)\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tReSTIRCommonSpatialPassSettings& restir_settings = IsReSTIRGI ? render_settings.restir_gi_settings.common_spatial_pass : render_settings.restir_di_settings.common_spatial_pass;\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tif (ImGui::CollapsingHeader(\"Spatial Reuse Pass\"))\n\t{\n\t\tImGui::PushID(&restir_settings);\n\t\tImGui::TreePush(\"ReSTIR - Spatial Reuse Pass Tree\");\n\t\t{\n\t\t\tdraw_before_panel();\n\n\t\t\tif (restir_settings.do_spatial_reuse_pass)\n\t\t\t{\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tbool use_spatial_target_function_visibility;\n\t\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\t\tuse_spatial_target_function_visibility = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY);\n\t\t\t\telse\n\t\t\t\t\tuse_spatial_target_function_visibility = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY);\n\t\t\t\tif (ImGui::Checkbox(\"Use visibility in target function\", &use_spatial_target_function_visibility))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_TARGET_FUNCTION_VISIBILITY : GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY, use_spatial_target_function_visibility ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to use the visibility term in the target function used for \"\n\t\t\t\t\t\"resampling spatial neighbors.\");\n\n\t\t\t\tint max_neighbor_count = restir_settings.reuse_neighbor_count;\n\t\t\t\tif (restir_settings.do_disocclusion_reuse_boost)\n\t\t\t\t\tmax_neighbor_count = std::max(max_neighbor_count, restir_settings.disocclusion_reuse_count);\n\t\t\t\tstatic int partial_visibility_neighbor_count = max_neighbor_count;\n\t\t\t\tif (use_spatial_target_function_visibility)\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"VisibilitySpatialReuseLastPassOnly Tree\");\n\n\t\t\t\t\t{\n\t\t\t\t\t\tif (ImGui::SliderInt(\"Partial neighbor visibility\", &partial_visibility_neighbor_count, 0, max_neighbor_count, \"%d\", ImGuiSliderFlags_AlwaysClamp))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t// Using -1 so that the user manipulates intuitive numbers between 0 and\n\t\t\t\t\t\t\t// 'restir_settings.reuse_neighbor_count'\n\t\t\t\t\t\t\t// but the shader actually wants value between -1 and\n\t\t\t\t\t\t\t// 'restir_settings.reuse_neighbor_count' for it to be meaningful\n\t\t\t\t\t\t\trestir_settings.neighbor_visibility_count = partial_visibility_neighbor_count;\n\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t}\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"How many neighbors will actually use a visibility term, can be useful to balance \"\n\t\t\t\t\t\t\t\"performance/variance but lowering this value below the maximum amount of neighbors may actually reduce \"\n\t\t\t\t\t\t\t\"performance because the final shading pass will have more visibility tests to do: if all neighbors use \"\n\t\t\t\t\t\t\t\"visibility during spatial resampling, then the final shading pass can be certain that all neighbors \"\n\t\t\t\t\t\t\t\"already take occlusion into account and so the final shading pass doesn't compute visibility. \"\n\t\t\t\t\t\t\t\"However, if 1 or 2 neighbors do not include visibility for example, then the final shading pass will \"\n\t\t\t\t\t\t\t\"have to trace rays for these neighbors and this will slow down the final shading pass quite a bit.\");\n\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Only on the last pass\", &restir_settings.do_visibility_only_last_pass))\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, the visibility in the resampling target function will only be used on the last spatial reuse pass\");\n\t\t\t\t\t}\n\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\t\t\t\tstatic bool do_optimal_vis_sampling = IsReSTIRGI ? ReSTIR_GI_DoOptimalVisibilitySampling : ReSTIR_DI_DoOptimalVisibilitySampling;\n\t\t\t\tif (ImGui::Checkbox(\"Do optimal visibility sampling\", &do_optimal_vis_sampling))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_DO_OPTIMAL_VISIBILITY_SAMPLING : GPUKernelCompilerOptions::RESTIR_DI_DO_OPTIMAL_VISIBILITY_SAMPLING, do_optimal_vis_sampling? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tif (ImGui::SliderInt(\"Spatial reuse pass count\", &restir_settings.number_of_passes, 1, 8))\n\t\t\t\t{\n\t\t\t\t\t// Clamping\n\t\t\t\t\trestir_settings.number_of_passes = std::max(1, restir_settings.number_of_passes);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\t// Checking the value before the \"Neighbor Reuse Count\" slider is modified\n\t\t\t\t// so that we know whether or not we'll have to keep the\n\t\t\t\t// 'partial_visibility_neighbor_count' value updated for the \"Partial Neighbor Visibility\" slider\n\t\t\t\tbool will_need_to_update_partial_visibility = partial_visibility_neighbor_count == max_neighbor_count;\n\t\t\t\tif (ImGui::SliderInt(\"Neighbor reuse count\", &restir_settings.reuse_neighbor_count, 0, 16))\n\t\t\t\t{\n\t\t\t\t\t// Updating the maximum\n\t\t\t\t\tmax_neighbor_count = restir_settings.reuse_neighbor_count;\n\t\t\t\t\tif (restir_settings.do_disocclusion_reuse_boost)\n\t\t\t\t\t\tmax_neighbor_count = std::max(max_neighbor_count, restir_settings.disocclusion_reuse_count);\n\n\t\t\t\t\tbool reuse_count_is_the_max = max_neighbor_count == restir_settings.reuse_neighbor_count;\n\t\t\t\t\treuse_count_is_the_max |= !restir_settings.do_disocclusion_reuse_boost;\n\t\t\t\t\tif (will_need_to_update_partial_visibility && reuse_count_is_the_max)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Also updating the partial visibility neighbor index slider if it was set to the maximum\n\t\t\t\t\t\t// amount of neighbors\n\t\t\t\t\t\tpartial_visibility_neighbor_count = restir_settings.reuse_neighbor_count;\n\t\t\t\t\t\trestir_settings.neighbor_visibility_count = partial_visibility_neighbor_count;\n\t\t\t\t\t}\n\n\t\t\t\t\tif (restir_settings.disocclusion_reuse_count < restir_settings.reuse_neighbor_count)\n\t\t\t\t\t\t// If disocclusion boost is now below the spatial neighbor count, bumping it up\n\t\t\t\t\t\t// because it makes no sense to have the disocclusion boost below the base\n\t\t\t\t\t\t// spatial neighbor count\n\t\t\t\t\t\trestir_settings.disocclusion_reuse_count = restir_settings.reuse_neighbor_count;\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tstd::string spatial_reuse_radius_text = restir_settings.use_adaptive_directional_spatial_reuse ? \"Max reuse radius (px)\" : \"Reuse radius (px)\";\n\t\t\t\tif (ImGui::SliderInt(spatial_reuse_radius_text.c_str(), &restir_settings.reuse_radius, 0, 64))\n\t\t\t\t{\n\t\t\t\t\trestir_settings.auto_reuse_radius = false;\n\n\t\t\t\t\tif (!restir_settings.debug_neighbor_location)\n\t\t\t\t\t\t// Clamping if not debugging (we do allow negative values when debugging)\n\t\t\t\t\t\trestir_settings.reuse_radius = std::max(0, restir_settings.reuse_radius);\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGui::SameLine();\n\t\t\t\tif (ImGui::Checkbox(\"Auto\", &restir_settings.auto_reuse_radius))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"Automatically determines the spatial reuse radius (or maximum spatial reuse radius if using \"\n\t\t\t\t\t\"\\\"adaptive-directional spatial reuse\\\") to use based on the render resolution.\");\n\n\t\t\t\tif (ImGui::CollapsingHeader(\"Directional spatial reuse\"))\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Directional spatial reuse tree\");\n\n\t\t\t\t\tif (!render_settings.accumulate)\n\t\t\t\t\t{\n\t\t\t\t\t\tImGuiRenderer::add_warning(\"Disabled because not accumulating\");\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::BeginDisabled(!render_settings.accumulate);\n\n\t\t\t\t\tif (ImGui::Checkbox(\"Use adaptive-directional spatial reuse\", &restir_settings.use_adaptive_directional_spatial_reuse))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Precomputes the best per-pixel spatial reuse radius to use as \"\n\t\t\t\t\t\t\"well as the sectors in the spatial reuse disk (split in 32 sectors) that should be used for reuse.\\n\\n\"\n\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\"This increases the spatial reuse \\\"hit rate\\\" (i.e. the number of neighbors that are not rejected by \"\n\t\t\t\t\t\t\"G-Buffer heuristics) and thus increases convergence speed.\\n\\n\"\n\t\t\t\t\t\t\"\"\n\t\t\t\t\t\t\"Has no effect if not accumulating.\");\n\n\t\t\t\t\tif (restir_settings.use_adaptive_directional_spatial_reuse)\t\n\t\t\t\t\t{\n\t\t\t\t\t\tif (ImGui::SliderInt(\"Minimum reuse radius (px)\", &restir_settings.minimum_per_pixel_reuse_radius, 0, restir_settings.reuse_radius))\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"The minimum radius that will be used per pixel when the optimal per-pixel spatial reuse \"\n\t\t\t\t\t\t\t\"radius is computed by \\\"adaptive-directional spatial reuse\\\"\");\n\n\n\n\t\t\t\t\t\tbool bitcount_changed = false;\n\t\t\t\t\t\tstatic int spatial_reuse_directional_masks_bitcount = IsReSTIRGI ? ReSTIR_GI_SpatialDirectionalReuseBitCount : ReSTIR_DI_SpatialDirectionalReuseBitCount;\n\t\t\t\t\t\tbitcount_changed |= ImGui::RadioButton(\"32 Bits\", &spatial_reuse_directional_masks_bitcount, 32); ImGui::SameLine();\n\t\t\t\t\t\tbitcount_changed |= ImGui::RadioButton(\"64 Bits\", &spatial_reuse_directional_masks_bitcount, 64);\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"How many bits to use for the directional spatial reuse bit masks.\\n\"\n\t\t\t\t\t\t\t\"More bits yields more precise result but use a little bit more VRAM.\");\n\t\t\t\t\t\tif (bitcount_changed)\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tglobal_kernel_options->set_macro_value(IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT : GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_DIRECTIONAL_REUSE_MASK_BIT_COUNT, spatial_reuse_directional_masks_bitcount);\n\t\t\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t\tif (ImGui::Checkbox(\"Compute spatial reuse hit rate\", &restir_settings.compute_spatial_reuse_hit_rate))\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::EndDisabled();\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\tif (ImGui::Checkbox(\"Coalesced spatial reuse\", &restir_settings.coalesced_spatial_reuse))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"Reuses the same random numbers for all the pixels in the \"\n\t\t\t\t\t\"image for picking the spatial neighbors such that memory accesses to surface \"\n\t\t\t\t\t\"data / reservoirs are coalesced\");\n\n\t\t\t\tif (ImGui::Checkbox(\"Increase disocclusion reuse count\", &restir_settings.do_disocclusion_reuse_boost))\n\t\t\t\t{\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tif (restir_settings.do_disocclusion_reuse_boost)\n\t\t\t\t\t{\n\t\t\t\t\t\t// We just enabled disocclusion boost\n\n\t\t\t\t\t\t// Recomputing the max neighbor with the disocclusion boost taken into account\n\t\t\t\t\t\tmax_neighbor_count = std::max(max_neighbor_count, restir_settings.disocclusion_reuse_count);\n\n\t\t\t\t\t\tpartial_visibility_neighbor_count = max_neighbor_count;\n\t\t\t\t\t}\n\t\t\t\t\telse\n\t\t\t\t\t\t// Disabled disocclusion boost, bringing the value back to its maximum before\n\t\t\t\t\t\t// disocclusion boost which is just the number of reused spatial neighbors\n\t\t\t\t\t\tpartial_visibility_neighbor_count = restir_settings.reuse_neighbor_count;\n\n\t\t\t\t\trestir_settings.neighbor_visibility_count = partial_visibility_neighbor_count;\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, the given number of neighbors will be reused for pixels that just got \"\n\t\t\t\t\t\"disoccluded due to camera movement (and thus that have no temporal history). This helps \"\n\t\t\t\t\t\"reduce noise in disoccluded regions.\");\n\t\t\t\tif (restir_settings.do_disocclusion_reuse_boost)\n\t\t\t\t{\n\t\t\t\t\t{\n\t\t\t\t\t\tImGui::TreePush(\"Disocclusion boost tree\");\n\n\t\t\t\t\t\tif (ImGui::SliderInt(\"Disoccluded Neighbor Reuse Count\", &restir_settings.disocclusion_reuse_count, restir_settings.reuse_neighbor_count, 16 + restir_settings.reuse_neighbor_count))\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t\t\t// Updating the maximum\n\t\t\t\t\t\t\tmax_neighbor_count = restir_settings.reuse_neighbor_count;\n\t\t\t\t\t\t\tif (restir_settings.do_disocclusion_reuse_boost)\n\t\t\t\t\t\t\t\tmax_neighbor_count = std::max(max_neighbor_count, restir_settings.disocclusion_reuse_count);\n\n\t\t\t\t\t\t\tif (will_need_to_update_partial_visibility)\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t// If the number of neighbors using visibility is set at the maximum, then we should\n\t\t\t\t\t\t\t\t// keep that value at the maximum as we modify the disoccluded neighbor reuse count\n\t\t\t\t\t\t\t\tmax_neighbor_count = restir_settings.disocclusion_reuse_count;\n\t\t\t\t\t\t\t\tpartial_visibility_neighbor_count = max_neighbor_count;\n\t\t\t\t\t\t\t\trestir_settings.neighbor_visibility_count = max_neighbor_count;\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\tImGuiRenderer::show_help_marker(\"How many neighbors a pixel will reuse if that pixel just got disoccluded.\");\n\n\t\t\t\t\t\tif (restir_settings.neighbor_visibility_count == restir_settings.reuse_neighbor_count)\n\t\t\t\t\t\t\t// If the user is using the visibility in the target function of all spatial neighbors,\n\t\t\t\t\t\t\t// modifying that maximum number should still keep the visibility target function count\n\t\t\t\t\t\t\t// to the maximum\n\t\t\t\t\t\t\trestir_settings.neighbor_visibility_count = std::max(restir_settings.disocclusion_reuse_count, restir_settings.reuse_neighbor_count);\n\n\t\t\t\t\t\tImGui::TreePop();\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tImGui::BeginDisabled(restir_settings.use_adaptive_directional_spatial_reuse);\n\n\t\t\t\tif (ImGui::Checkbox(\"Spatial neighbors random rotation\", &restir_settings.do_neighbor_rotation))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"If checked, spatial neighbors sampled (using the Hammersley point set) \"\n\t\t\t\t\t\"will be randomly rotated. Because neighbor locations are generated with a Hammersley point set \"\n\t\t\t\t\t\"(deterministic), not rotating them results in every pixel of every rendered image reusing the \"\n\t\t\t\t\t\"same neighbor locations which decreases reuse efficiency.\");\n\t\t\t\tImGui::EndDisabled();\n\n\t\t\t\tImGui::BeginDisabled(!render_settings.enable_adaptive_sampling);\n\t\t\t\tif (ImGui::Checkbox(\"Allow reuse of converged neighbors\", &restir_settings.allow_converged_neighbors_reuse))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tstd::string reuse_of_converged_neighbors_help = \"If checked, then the spatial reuse passes are allowed \"\n\t\t\t\t\t\"to reuse from neighboring pixels which have converged (and thus neighbors that \"\n\t\t\t\t\t\"are not being sampled anymore = neighbors whose reservoirs do not evolve anymore). \"\n\t\t\t\t\t\"This improves performance but at the cost of bias when non-converged \"\n\t\t\t\t\t\"pixels try to reuse from converged pixels. The bias will thus typically manifest \"\n\t\t\t\t\t\"on the parts of the image that are the hardest to render.\";\n\t\t\t\tif (!render_settings.enable_adaptive_sampling)\n\t\t\t\t\treuse_of_converged_neighbors_help += \"\\n\\nDisabled because adaptive sampling isn't enabled.\";\n\t\t\t\tImGuiRenderer::show_help_marker(reuse_of_converged_neighbors_help);\n\t\t\t\tif (restir_settings.allow_converged_neighbors_reuse)\n\t\t\t\t{\n\t\t\t\t\tif (ImGui::SliderFloat(\"Converged Neighbor Reuse Probability\", &restir_settings.converged_neighbor_reuse_probability, 0.0f, 1.0f))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\tImGuiRenderer::show_help_marker(\"Allows trading bias for rendering performance by \"\n\t\t\t\t\t\t\"spatially reusing converged neighbors only with a certain probability instead of never / always.\"\n\t\t\t\t\t\t\"\\n\\n 0.0 nevers reuses converged neighbors. No bias but performance impact.\"\n\t\t\t\t\t\t\"\\n\\n 1.0 always reuses converged neighbors. Biased but no performance impact.\");\n\t\t\t\t}\n\t\t\t\tImGui::EndDisabled();\n\n\t\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to gather statistics on the hit rate of the spatial reuse \"\n\t\t\t\t\t\"pass (i.e. how many neighbors are rejected because of the G-Buffer heuristics vs. the maximum number \"\n\t\t\t\t\t\"of neighbors that can be reused).\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"This is mainly useful to evaluate the effectiveness of the \\\"adaptive-directional spatial reuse\\\".\\n\"\n\t\t\t\t\t\"Note that this isn't great for performance.\");\n\t\t\t\tif (restir_settings.compute_spatial_reuse_hit_rate)\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Spatial reuse hit rate statistics\");\n\n\t\t\t\t\tif (restir_settings.spatial_reuse_hit_rate_total != nullptr)\n\t\t\t\t\t{\n\t\t\t\t\t\t// Making sure that the buffers are indeed allocated\n\n\t\t\t\t\t\tunsigned long long int spatial_reuse_total = OrochiBuffer<unsigned long long int>::download_data(reinterpret_cast<unsigned long long int*>(restir_settings.spatial_reuse_hit_rate_total), 1)[0];\n\t\t\t\t\t\tunsigned long long int spatial_reuse_hit = OrochiBuffer<unsigned long long int>::download_data(reinterpret_cast<unsigned long long int*>(restir_settings.spatial_reuse_hit_rate_hits), 1)[0];\n\n\t\t\t\t\t\tImGui::Text(\"Hit rate: %f\", spatial_reuse_hit * 100.0f / spatial_reuse_total);\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tImGui::TreePop();\n\t\tImGui::PopID();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n}\n\ntemplate <bool IsReSTIRGI>\nvoid ImGuiSettingsWindow::draw_ReSTIR_bias_correction_panel()\n{\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\tReSTIRCommonSettings* restir_settings;\n\tif constexpr (IsReSTIRGI)\n\t\trestir_settings = &m_renderer->get_render_settings().restir_gi_settings;\n\telse\n\t\trestir_settings = &m_renderer->get_render_settings().restir_di_settings;\n\n\tif (ImGui::CollapsingHeader(\"Bias correction\"))\n\t{\n\t\tImGui::PushID(restir_settings);\n\t\tImGui::TreePush(\"Bias correction tree ReSTIR\");\n\n\t\t{\n\t\t\tconst char* bias_correction_mode_items[] = {\n\t\t\t\t\"- 1/M (Biased)\",\n\t\t\t\t\"- 1/Z\",\n\t\t\t\t\"- MIS-like\",\n\t\t\t\t\"- Generalized balance heuristic\",\n\t\t\t\t\"- Pairwise MIS\",\n\t\t\t\t\"- Pairwise MIS defensive\",\n\t\t\t\t\"- Pairwise symmetric ratio\",\n\t\t\t\t\"- Pairwise asymmetric ratio\",\n\t\t\t};\n\n\t\t\tconst char* tooltips[] = {\n\t\t\t\t\"Very simple biased weights as described in the 2020 ReSTIR DI paper(Eq. 6).\\n\"\n\t\t\t\t\"Those weights are biased because they do not account for cases where \"\n\t\t\t\t\"we resample a sample that couldn't have been produced by some neighbors.\\n\"\n\t\t\t\t\"The bias shows up as darkening, mostly at object boundaries. In GRIS vocabulary, \"\n\t\t\t\t\"this type of weights can be seen as confidence weights alone c_i / sum(c_j).\",\n\n\t\t\t\t\"Simple unbiased weights as described in the 2020 ReSTIR paper (Eq. 16 and Section 4.3).\\n\"\n\t\t\t\t\"Those weights are unbiased but can have * *extremely * *bad variance when a neighbor being resampled \"\n\t\t\t\t\"has a very low target function(when the neighbor is a glossy surface for example).\\n\"\n\t\t\t\t\"See Fig. 7 of the 2020 paper.\",\n\n\t\t\t\t\"Unbiased weights as proposed by Eq. 22 of the paper.Way better than 1 / Z in terms of variance \"\n\t\t\t\t\"and still unbiased.\",\n\n\t\t\t\t\"Unbiased MIS weights that use the generalized balance heuristic. Very good variance reduction but O(N ^ 2) complexity, \"\n\t\t\t\t\"N being the number of neighbors resampled.\\n\"\n\t\t\t\t\"Eq. 36 of the 2022 Generalized Resampled Importance Sampling paper.\",\n\n\t\t\t\t\"Similar variance reduction to the generalized balance heuristic and only O(N) computational cost.\\n\"\n\t\t\t\t\"Section 7.1.3 of \\\"A Gentle Introduction to ReSTIR\\\", 2023\",\n\n\t\t\t\t\"Similar variance reduction to the generalized balance heuristic and only O(N) computational cost.\\n\"\n\t\t\t\t\"Section 7.1.3 of \\\"A Gentle Introduction to ReSTIR\\\", 2023\",\n\n\t\t\t\t\"A bit more variance than pairwise MIS but way more robust to temporal correlations.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Implementation of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\",\n\n\t\t\t\t\"A bit more variance than pairwise MIS but way more robust to temporal correlations.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Implementation of [Enhancing Spatiotemporal Resampling with a Novel MIS Weight, Pan et al., 2024]\"\n\t\t\t};\n\n\t\t\tint* bias_correction_weights_option_pointer = global_kernel_options->get_raw_pointer_to_macro_value(IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_WEIGHTS : GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS);\n\t\t\tif (ImGuiRenderer::ComboWithTooltips(\"MIS Weights\", bias_correction_weights_option_pointer, bias_correction_mode_items, IM_ARRAYSIZE(bias_correction_mode_items), tooltips))\n\t\t\t{\n\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"What weights to use to resample reservoirs\");\n\n\t\t\tbool disable_confidence_weights = *bias_correction_weights_option_pointer == (IsReSTIRGI ? RESTIR_GI_BIAS_CORRECTION_1_OVER_M : RESTIR_DI_BIAS_CORRECTION_1_OVER_M)\n\t\t\t\t\t\t\t\t\t\t   || *bias_correction_weights_option_pointer == (IsReSTIRGI ? RESTIR_GI_BIAS_CORRECTION_1_OVER_Z : RESTIR_DI_BIAS_CORRECTION_1_OVER_Z);\n\n\t\t\tif (*bias_correction_weights_option_pointer == RESTIR_DI_BIAS_CORRECTION_SYMMETRIC_RATIO ||\n\t\t\t\t*bias_correction_weights_option_pointer == RESTIR_DI_BIAS_CORRECTION_ASYMMETRIC_RATIO ||\n\t\t\t\t*bias_correction_weights_option_pointer == RESTIR_GI_BIAS_CORRECTION_SYMMETRIC_RATIO ||\n\t\t\t\t*bias_correction_weights_option_pointer == RESTIR_GI_BIAS_CORRECTION_ASYMMETRIC_RATIO)\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Beta exponent\", &restir_settings->symmetric_ratio_mis_weights_beta_exponent, 1.0f, 5.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t}\n\n\t\t\tImGui::BeginDisabled(disable_confidence_weights);\n\t\t\tif (ImGui::Checkbox(\"Use confidence weights\", &restir_settings->use_confidence_weights))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tstd::string confidence_weight_help_string = \"Whether or not to use confidence weights when resampling the samples. Confidence weights allow proper temporal reuse.\";\n\t\t\tif (disable_confidence_weights)\n\t\t\t\tconfidence_weight_help_string += \"\\n\\nDisabled because 1/M or 1/Z weights use confidence weights by design.\";\n\t\t\tImGuiRenderer::show_help_marker(confidence_weight_help_string);\n\t\t\tImGui::EndDisabled();\n\n\t\t\t// No visibility bias correction for 1/M weights\n\t\t\tbool bias_correction_visibility_disabled = *bias_correction_weights_option_pointer == (IsReSTIRGI ? RESTIR_GI_BIAS_CORRECTION_1_OVER_M : RESTIR_DI_BIAS_CORRECTION_1_OVER_M);\n\t\t\tbool bias_correction_use_visibility;\n\t\t\tif constexpr (IsReSTIRGI)\n\t\t\t\tbias_correction_use_visibility = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY);\n\t\t\telse\n\t\t\t\tbias_correction_use_visibility = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY);\n\t\t\tImGui::BeginDisabled(bias_correction_visibility_disabled);\n\t\t\tif (ImGui::Checkbox(\"Use visibility in bias correction\", &bias_correction_use_visibility))\n\t\t\t{\n\t\t\t\tint* bias_correction_use_visibility_option_pointer = global_kernel_options->get_raw_pointer_to_macro_value(IsReSTIRGI ? GPUKernelCompilerOptions::RESTIR_GI_BIAS_CORRECTION_USE_VISIBILITY : GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY);\n\t\t\t\t*bias_correction_use_visibility_option_pointer = bias_correction_use_visibility ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE;\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tif (bias_correction_visibility_disabled)\n\t\t\t\tImGuiRenderer::show_help_marker(\"Visibility bias correction cannot be used with 1/M weights.\");\n\t\t\tImGui::EndDisabled();\n\t\t}\n\n\t\tImGui::TreePop();\n\t\tImGui::PopID();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_next_event_estimation_plus_plus_panel()\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tif (ImGui::CollapsingHeader(\"Next Event Estimation++\"))\n\t{\n\t\tImGui::TreePush(\"Use NEE++ Tree\");\n\n\t\tuse_next_event_estimation_checkbox();\n\t\tImGuiRenderer::show_help_marker(\"Whether or not to use NEE++ [Guo et al., 2020] features at all.\");\n\n\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS) == KERNEL_OPTION_TRUE)\n\t\t{\n\t\t\tImGui::TreePush(\"NEE++ Settings Tree\");\n\n\t\t\tImGui::Text(\"VRAM Usage: %.3fMB\", m_renderer->get_NEE_plus_plus_render_pass()->get_vram_usage_bytes() / 1000000.0f);\n\t\t\tstatic bool display_load_factor = false;\n\t\t\tif (display_load_factor)\n\t\t\t{\n\t\t\t\tm_renderer->get_NEE_plus_plus_render_pass()->get_nee_plus_plus_storage().update_cell_alive_count();\n\t\t\t\tImGui::Text(\"Load factor: %.3f%%\", m_renderer->get_NEE_plus_plus_render_pass()->get_load_factor() * 100.0f);\n\t\t\t}\n\t\t\telse\n\t\t\t\tImGui::Text(\"Load factor: ---\");\n\t\t\tImGui::SameLine(); \n\t\t\tImGui::Checkbox(\"Display load factor\", &display_load_factor);\n\n\t\t\tif (ImGui::InputFloat(\"Max VRAM usage (MB)\", &m_renderer->get_NEE_plus_plus_render_pass()->get_max_vram_usage()))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Grid cell target projected size\", &render_data.nee_plus_plus.m_grid_cell_target_projected_size, 5, 25))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"The target screen-space size (in pixels) that a grid cell should occupy on the screen.\\n\"\n\t\t\t\t\t\"This has the effect of making the grid cells larger in the distance so that the projected size stays approximately constant.\");\n\n\t\t\t\tif (ImGui::SliderFloat(\"Grid cell minimum size\", &render_data.nee_plus_plus.m_grid_cell_min_size, 0.005, 0.5))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"The minimum size of a grid cell in world space units\");\n\n\t\t\t\tImGui::SliderInt(\"Update max samples\", &render_data.nee_plus_plus.m_stop_update_samples, 1, 96);\n\t\t\t\tImGuiRenderer::show_help_marker(\"After this many samples, the update of the visibility will automatically \"\n\t\t\t\t\t\"stop to save some performance because accumulating forever isn't necessary for visibility caching precision.\");\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t\tbool use_nee_plus_plus_rr = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE);\n\t\t\t\tif (ImGui::Checkbox(\"Use NEE++ Russian Roulette\", &use_nee_plus_plus_rr))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE, use_nee_plus_plus_rr ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Implementation of NEE++, [Guo et al., 2020].\\n\"\n\t\t\t\t\t\"If checked, the voxel-to-voxel visibility estimate of NEE++ will be used to \"\n\t\t\t\t\t\"stochastically determine whether or not attempt at all to trace a shadow at \"\n\t\t\t\t\t\"a light during next-event-estimation.\");\n\t\t\t\tif (global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS_RUSSIAN_ROULETTE) == KERNEL_OPTION_TRUE)\n\t\t\t\t\tImGui::Text(\"Shadow rays traced: %.3f%%\", m_renderer->get_nee_plus_plus_storage().get_shadow_rays_actually_traced_from_GPU() / (float)m_renderer->get_nee_plus_plus_storage().get_total_shadow_rays_queries_from_GPU() * 100.0f);\n\n\t\t\t\tif (use_nee_plus_plus_rr)\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"NEE++ RR options tree\");\n\n\t\t\t\t\tif (ImGui::Checkbox(\"Use NEE++ RR for emissives\", &render_data.nee_plus_plus.m_enable_nee_plus_plus_RR_for_emissives))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\tif (ImGui::Checkbox(\"Use NEE++ RR for envmap\", &render_data.nee_plus_plus.m_enable_nee_plus_plus_RR_for_envmap))\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\t\t{\n\t\t\t\t\t\tunsigned int traced = 0;\n\t\t\t\t\t\tunsigned int total = 0;\n\n\t\t\t\t\t\tImGui::SameLine();\n\t\t\t\t\t\tstd::string button_text = render_data.nee_plus_plus.do_update_shadow_rays_traced_statistics ? \"Stop\" : \"Resume\";\n\t\t\t\t\t\tif (ImGui::Button(button_text.c_str()))\n\t\t\t\t\t\t\trender_data.nee_plus_plus.do_update_shadow_rays_traced_statistics = !render_data.nee_plus_plus.do_update_shadow_rays_traced_statistics;\n\n\t\t\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t}\n\n\t\t\t{\n\t\t\t\tif (ImGui::SliderFloat(\"Confidence threshold\", &render_data.nee_plus_plus.m_confidence_threshold, 0.0f, 1.0f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\tImGuiRenderer::show_help_marker(\"If a voxel-to-voxel unocclusion probability is higher than that, \"\n\t\t\t\t\t\"the voxel will be considered unoccluded and so a shadow ray will be traced. This is to \"\n\t\t\t\t\t\"avoid trusting voxel that have a low probability of being unoccluded\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"0.0f basically disables NEE++ as any entry of the visibility map will require a shadow ray.\\n\\n\"\n\t\t\t\t\t\"\"\n\t\t\t\t\t\"Higher values yield higher performance but also higher variance (and the tradeoff doesn't seem \"\n\t\t\t\t\t\"worth it, hence the very low default value which means that we only allow ourselves \"\n\t\t\t\t\t\"to save shadow rays when we have a very high probability that the two voxels are occluded.\");\n\n\t\t\t\tif (ImGui::SliderFloat(\"Minimum unoccluded proba\", &render_data.nee_plus_plus.m_minimum_unoccluded_proba, 0.0f, 0.1f))\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t}\n\n\t\t\tif (ImGui::CollapsingHeader(\"Debug\"))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"NEE++ debug tree\");\n\n\t\t\t\tint nee_plus_plus_debug_mode = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::NEE_PLUS_PLUS_DEBUG_MODE);\n\t\t\t\tconst char* items[] = { \"- No debug\", \"- Grid cells\" };\n\t\t\t\tif (ImGui::Combo(\"Debug mode\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::NEE_PLUS_PLUS_DEBUG_MODE), items, IM_ARRAYSIZE(items)))\n\t\t\t\t{\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tif (ImGui::Button(\"Clear visibility map\"))\n\t\t\t\t{\n\t\t\t\t\tm_renderer->get_NEE_plus_plus_render_pass()->reset(false);\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\t\tbool display_shadow_rays = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED);\n\t\t\t\tif (ImGui::Checkbox(\"Display shadow rays discarded\", &display_shadow_rays))\n\t\t\t\t{\n\t\t\t\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED, display_shadow_rays ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"With this debug view enabled, every black pixel is a pixel which discarded its \"\n\t\t\t\t\t\"shadow ray thanks to NEE++ russian roulette.\\n\"\n\t\t\t\t\t\"A colored pixel didn't discard its shadow ray.\");\n\t\t\t\tif (display_shadow_rays)\n\t\t\t\t{\n\t\t\t\t\tImGui::TreePush(\"Display shadow rays tree\");\n\n\t\t\t\t\tstatic int shadow_ray_bounce_to_display = DirectLightNEEPlusPlusDisplayShadowRaysDiscardedBounce;\n\t\t\t\t\tif (ImGui::SliderInt(\"Bounce to display\", &shadow_ray_bounce_to_display, 0, m_renderer->get_render_settings().nb_bounces))\n\t\t\t\t\t{\n\t\t\t\t\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_NEE_PLUS_PLUS_DISPLAY_SHADOW_RAYS_DISCARDED_BOUNCE, shadow_ray_bounce_to_display);\n\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t}\n\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n}\n\nbool ImGuiSettingsWindow::use_next_event_estimation_checkbox(const std::string& text)\n{\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tbool use_nee_plus_plus = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS);\n\tif (ImGui::Checkbox(text.c_str(), &use_nee_plus_plus))\n\t{\n\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_USE_NEE_PLUS_PLUS, use_nee_plus_plus ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\tm_renderer->recompile_kernels();\n\t\tm_render_window->set_render_dirty(true);\n\n\t\treturn true;\n\t}\n\n\treturn false;\n}\n\nvoid ImGuiSettingsWindow::draw_principled_bsdf_energy_conservation()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tImGui::SeparatorText(\"BSDF Energy Conservation Settings\");\n\n\tbool do_energy_conservation = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION);\n\tif (ImGui::Checkbox(\"Do energy conservation\", &do_energy_conservation))\n\t{\n\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_ENERGY_COMPENSATION, do_energy_conservation ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\tm_renderer->recompile_kernels();\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\tImGuiRenderer::show_help_marker(\"Global toggle to completely enable/disable any forms \"\n\t\t\"of energy compensation in all the materials using the Principled BSDF\");\n\n\tif (do_energy_conservation)\n\t{\n\t\tImGui::TreePush(\"Energy conservation options tree\");\n\n\t\t{\n\t\t\tbool do_glass_energy_compensation = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION);\n\t\t\tif (ImGui::Checkbox(\"Do glass lobe energy compensation\", &do_glass_energy_compensation))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_GLASS_ENERGY_COMPENSATION, do_glass_energy_compensation ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Global toggle on whether or not objects in the scene that use \"\n\t\t\t\t\"the Principled BSDF should do energy compensation for the glass layer.\"\n\t\t\t\t\"\"\n\t\t\t\t\"Implementation of [Practical multiple scattering compensation for microfacet models, Turquin, 2019].\");\n\t\t}\n\n\t\t{\n\t\t\tbool do_clearcoat_energy_compensation = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION);\n\t\t\tif (ImGui::Checkbox(\"Do clearcoat lobe energy compensation\", &do_clearcoat_energy_compensation))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_CLEARCOAT_ENERGY_COMPENSATION, do_clearcoat_energy_compensation ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Global toggle on whether or not objects in the scene that use \"\n\t\t\t\t\"the Principled BSDF should do energy compensation for the clearcoat layer.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Energy compensation on the clearcoat layer is an approximation but works very well in common cases.\");\n\t\t}\n\n\t\t{\n\t\t\tbool do_specular_energy_compensation = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION);\n\t\t\tif (ImGui::Checkbox(\"Do specular/diffuse lobe energy compensation\", &do_specular_energy_compensation))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_SPECULAR_ENERGY_COMPENSATION, do_specular_energy_compensation ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Global toggle on whether or not objects in the scene that use \"\n\t\t\t\t\"the Principled BSDF should do energy compensation for the glossy (specular/diffuse) layer.\"\n\t\t\t\t\"\"\n\t\t\t\t\"Implementation of [Practical multiple scattering compensation for microfacet models, Turquin, 2019].\");\n\t\t}\n\n\t\t{\n\t\t\tbool do_metallic_energy_compensation = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION);\n\t\t\tif (ImGui::Checkbox(\"Do metallic lobe energy compensation\", &do_metallic_energy_compensation))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_ENERGY_COMPENSATION, do_metallic_energy_compensation ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"Global toggle on whether or not objects in the scene that use \"\n\t\t\t\t\"the Principled BSDF should do energy compensation for the metallic layer.\"\n\t\t\t\t\"\"\n\t\t\t\t\"Implementation of [Practical multiple scattering compensation for microfacet models, Turquin, 2019].\");\n\n\t\t\tif (do_metallic_energy_compensation)\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"Fresnel multiscatter tree\");\n\n\t\t\t\tbool use_multiple_scattering_fresnel = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION);\n\t\t\t\tif (ImGui::Checkbox(\"Do GGX Multiple scattering fresnel\", &use_multiple_scattering_fresnel))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_METALLIC_FRESNEL_ENERGY_COMPENSATION, use_multiple_scattering_fresnel ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\tImGuiRenderer::show_help_marker(\"Implementation of [Practical multiple scattering compensation for microfacet models, Turquin, 2019]\"\n\t\t\t\t\t\" for GGX energy compensation. The multiple scattering fresnel term takes into account the Fresnel \"\n\t\t\t\t\t\"reflection/transmission effect when the rays bounce multiple times on the microsurface. This is responsible \"\n\t\t\t\t\t\"for the increase in saturation of the color of conductors due to multiple scattering in-between the microfacets.\");\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\t\t}\n\n\t\tbool setting_changed = false;\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::SeparatorText(\"Energy conservation max bounces\");\n\t\tImGui::Text(\"%s\", \"\");\n\t\tImGuiRenderer::show_help_marker(\"After what bounce to stop doing energy conservation (depending on the type of material)\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"0 means that energy conservation will only be done on the first hit (of camera rays) for example.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"For glass, a value of 4 is usually enough to avoid losing too much energy when looking straight at a rough glass object.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"For metals, 0 (only on the first hit) is also probably good enough except in some specific cases where rays get trapped \"\n\t\t\t\"(on a Mitsuba knob for example) where 4+ bounces may be required for decent results.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"For clearcoated and specular materials, 0 is enough for smooth clearcoat/specular layers. \"\n\t\t\t\"For high roughness clearcoats/specular layers, the situation is the same as for metals: \"\n\t\t\t\"0 should be good enough as long as there are not too many concentrated inter-reflections \"\n\t\t\t\"(in which case, a higher value, 4+, is going to be preferred).\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"-1 to disable and always do energy compensation.\");\n\t\tsetting_changed |= ImGui::SliderInt(\"Glass\", &render_data.bsdfs_data.glass_energy_compensation_max_bounce, -1, render_settings.nb_bounces);\n\t\tsetting_changed |= ImGui::SliderInt(\"Clearcoat\", &render_data.bsdfs_data.clearcoat_energy_compensation_max_bounce, -1, render_settings.nb_bounces);\n\t\tsetting_changed |= ImGui::SliderInt(\"Specular/diffuse\", &render_data.bsdfs_data.glossy_base_energy_compensation_max_bounce, -1, render_settings.nb_bounces);\n\t\tsetting_changed |= ImGui::SliderInt(\"Metallic\", &render_data.bsdfs_data.metal_energy_compensation_max_bounce, -1, render_settings.nb_bounces);\n\t\tif (setting_changed)\n\t\t\tm_render_window->set_render_dirty(true);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::Text(\"Energy compensation roughness threshold\");\n\t\tif (ImGui::SliderFloat(\"\", &render_data.bsdfs_data.energy_compensation_roughness_threshold, 0.0f, 1.0f))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGuiRenderer::show_help_marker(\"Below this roughness, energy compensation will not be applied.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"Generally speaking, the darkening of the material due to missing energy compensation is barely visible below 0.15f roughness.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"0.0f disables the threshold and energy compensation will always be applied.\");\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::Checkbox(\"Use hardware texture interpolation\", &render_data.bsdfs_data.use_hardware_tex_interpolation))\n\t\t{\n\t\t\tm_renderer->load_GGX_glass_energy_compensation_textures(render_data.bsdfs_data.use_hardware_tex_interpolation ? hipFilterModeLinear : hipFilterModePoint);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"Using the hardware for texture interpolation is faster but less precise than doing manual interpolation in the shader.\");\n\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiSettingsWindow::display_ReSTIR_DI_bias_status(std::shared_ptr<GPUKernelCompilerOptions> kernel_options)\n{\n\tImGui::Text(\"Status: \"); ImGui::SameLine();\n\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tstd::vector<std::string> bias_reasons;\n\tstd::vector<std::string> hover_explanations;\n\tif (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS) == RESTIR_DI_BIAS_CORRECTION_1_OVER_M)\n\t{\n\t\tbias_reasons.push_back(\"- 1/M biased weights\");\n\t\thover_explanations.push_back(\"1/M weights do not take the number of neighbors that \"\n\t\t\t\"could have produced the resampled sample into account.This leads to darkening \"\n\t\t\t\"bias because we're not weighting our picked sample as if it could have been \"\n\t\t\t\"produced by M neighbors whereas less neighbors than that could have actually produced it.\");\n\t}\n\n\tif (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE) == KERNEL_OPTION_TRUE\n\t\t&& kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY) == KERNEL_OPTION_FALSE)\n\t{\n\t\tbias_reasons.push_back(\"- Visibility reuse without visibility in bias correction\");\n\t\thover_explanations.push_back(\"When using the visibility reuse pass at the end of the \"\n\t\t\t\"initial candidates sampling pass, light samples that are occluded are discarded.\\n\"\n\t\t\t\"Temporal & spatial reuse pass will then only resample on unoccluded samples.\\n\"\n\t\t\t\"If not accounting for visibility when counting valid neighbors, we may determine \"\n\t\t\t\"that a neighbor could have produced the picked sample when actually, it couldn't \"\n\t\t\t\"because from the neighbor's point of view, the sample could have been occluded \"\n\t\t\t\"(visibility reuse pass).\\n\"\n\t\t\t\"This overestimates the number of valid neighbors and results in darkening.\\n\\n\");\n\t}\n\n\tif ((kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY) == KERNEL_OPTION_TRUE\n\t\t|| (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY) == KERNEL_OPTION_TRUE && render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass))\n\t\t&& kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY) == KERNEL_OPTION_FALSE)\n\t{\n\t\tstd::string prefix;\n\t\tif (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY) == KERNEL_OPTION_TRUE)\n\t\t\tprefix = \" - Initial \";\n\t\telse if (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_SPATIAL_TARGET_FUNCTION_VISIBILITY) == KERNEL_OPTION_TRUE && render_settings.restir_di_settings.common_spatial_pass.do_spatial_reuse_pass)\n\t\t\tprefix = \" - Spatial \";\n\n\t\tbias_reasons.push_back(prefix + \"target function visibility without\\n\"\n\t\t\t\"    visibility in bias correction\");\n\t\thover_explanations.push_back(\"When using the visibility term in the target function used to \"\n\t\t\t\"produce initial candidate samples (or temporally/spatially resample), all remaining samples are unoccluded.\\n\"\n\t\t\t\"Temporal & spatial reuse passes will then only resample on unoccluded samples.\\n\"\n\t\t\t\"If not accounting for visibility when counting valid neighbors (visibility in bias correction), we may determine \"\n\t\t\t\"that a neighbor could have produced the picked sample when actually, it couldn't \"\n\t\t\t\"because from the neighbor's point of view, the sample could have been occluded \"\n\t\t\t\"(visibility term in target function).\\n\"\n\t\t\t\"This overestimates the number of valid neighbors and results in darkening.\\n\\n\");\n\t}\n\n\tif (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_DO_VISIBILITY_REUSE) == KERNEL_OPTION_FALSE\n\t\t&& kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_INITIAL_TARGET_FUNCTION_VISIBILITY) == KERNEL_OPTION_FALSE\n\t\t&& kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_USE_VISIBILITY) == KERNEL_OPTION_TRUE\n\t\t&& (kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS) == RESTIR_DI_BIAS_CORRECTION_1_OVER_Z\n\t\t\t|| kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS) == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS\n\t\t\t|| kernel_options->get_macro_value(GPUKernelCompilerOptions::RESTIR_DI_BIAS_CORRECTION_WEIGHTS) == RESTIR_DI_BIAS_CORRECTION_PAIRWISE_MIS_DEFENSIVE))\n\t{\n\t\tbias_reasons.push_back(\"- Visibility in bias correction without\\n\"\n\t\t\t\"visibility reuse (or initial candidates visibility)\");\n\t\thover_explanations.push_back(\"When taking visibility into account in the counting of \"\n\t\t\t\"valid neighbors (visibility in bias correction), we're going to assume that if the picked sample (from resampling \"\n\t\t\t\"the neighbors) is occluded from the neighbor's point of view, then that neighbor \"\n\t\t\t\"couldn't have produced that sample.\\n\\n\"\n\t\t\t\"However, that's incorrect.\\n\\n\"\n\t\t\t\"The initial candidate sampling pass doesn't take visibility into account and can \"\n\t\t\t\"thus produce occluded samples. Without the visibility reuse pass (or visibility used \"\n\t\t\t\"directly in the target function), this statement stays true.\\n\"\n\t\t\t\"This means that \\\"a sample that is occluded from the neighbor's point of view\\\" could actually \"\n\t\t\t\"have been produced.\\n\"\n\t\t\t\"We are then underestimating the number of valid neighbors that could have produced \"\n\t\t\t\"our sample and we end up with brightening bias.\\n\"\n\t\t\t\"This is an issue with 1/Z weights (and pairwise-MIS) because MIS-like and proper MIS \"\n\t\t\t\"(generalized balance heuristic/GBH) weights do not blindly overweight a sample as \"\n\t\t\t\"1/Z does (and then hopes that we divide by Z accordingly).\");\n\t}\n\n\tif (render_settings.enable_adaptive_sampling\n\t\t&& render_settings.restir_di_settings.common_spatial_pass.allow_converged_neighbors_reuse\n\t\t&& render_settings.restir_di_settings.common_spatial_pass.converged_neighbor_reuse_probability > 0.0f)\n\t{\n\t\tbias_reasons.push_back(\"- Adaptive Sampling + \\\"Allow Reuse of Converged Neighbors\\\"\");\n\t\thover_explanations.push_back(\"Adaptive sampling disables the sampling of some pixels. The \"\n\t\t\t\"spatial reuse pass then reuses from neighbors that do not evolve anymore (if they've \"\n\t\t\t\"been disabled by adaptive sampling) and that causes some slight convergence issues, \"\n\t\t\t\"especially on parts of the image where adaptive sampling does the more work. This \"\n\t\t\t\"manifest as bias on the hardest-to-render parts of the scene.\");\n\t}\n\n\tif (!render_settings.restir_di_settings.do_final_shading_visibility)\n\t{\n\t\tbias_reasons.push_back(\"- Not using final shading visibility\");\n\t\thover_explanations.push_back(\"Not using visibility during the final shading of samples \"\n\t\t\t\"produced by ReSTIR leads to \\\"missing\\\" shadows and an overall brightening of the \"\n\t\t\t\"scene because light samples are assumed unoccluded when they actually aren't.\");\n\t}\n\n\tif (!bias_reasons.empty())\n\t{\n\t\tImGui::TextColored(ImVec4(1.0f, 0.0f, 0.0f, 1.0f), \"Biased\");\n\t\tImGui::TreePush(\"Bias reasons\");\n\n\t\tfor (int i = 0; i < bias_reasons.size(); i++)\n\t\t{\n\t\t\tImGui::Text(\"%s\", bias_reasons[i].c_str());\n\t\t\tImGuiRenderer::add_tooltip(hover_explanations[i].c_str());\n\t\t\tImGuiRenderer::show_help_marker(hover_explanations[i].c_str());\n\n\t\t}\n\t\tImGui::TreePop();\n\n\t}\n\telse\n\t\tImGui::TextColored(ImVec4(0.0f, 1.0f, 0.0f, 1.0f), \"Unbiased\");\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n}\n\nvoid ImGuiSettingsWindow::draw_denoiser_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Denoiser\"))\n\t\treturn;\n\n\tImGui::TreePush(\"Denoiser tree\");\n\n\tif (ImGui::Checkbox(\"Enable denoiser\", &m_application_settings->enable_denoising))\n\t\tm_render_window->get_display_view_system()->queue_display_view_change(m_application_settings->enable_denoising ? DisplayViewType::DENOISED_BLEND : DisplayViewType::DEFAULT);\n\tif (ImGui::Checkbox(\"Use OpenGL Interop AOV Buffers\", &m_application_settings->denoiser_use_interop_buffers))\n\t{\n\t\tm_renderer->set_use_denoiser_AOVs_interop_buffers(m_application_settings->denoiser_use_interop_buffers);\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\tImGuiRenderer::show_help_marker(\"If checked, a little bit of path tracing performance will be gained (on AMD GPUs at least) at the expense of \"\n\t\t\"a good bit of performance if displaying \\\"- Denoiser - Normals\\\" or \\\" - Denoiser - Albedo\\\" in the viewport.\\n\\n\"\n\t\t\"\"\n\t\t\"You want this option checked only if you're visualizing the denoiser normals or denoiser albedo basically.\");\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tImGui::BeginDisabled(!m_application_settings->enable_denoising);\n\tif (ImGui::CollapsingHeader(\"AOVs\"))\n\t{\n\t\tImGui::TreePush(\"Denoiser AOVs Tree\");\n\t\tif (ImGui::Checkbox(\"Use albedo AOV\", &m_application_settings->denoiser_use_albedo))\n\t\t{\n\t\t\tm_application_settings->denoiser_settings_changed = true;\n\n\t\t\tm_render_window_denoiser->set_use_albedo(m_application_settings->denoiser_use_albedo);\n\t\t\tif (!m_application_settings->denoiser_use_albedo)\n\t\t\t{\n\t\t\t\t// We're forcing the use of normals AOV off here because it seems like OIDN doesn't support normal\n\t\t\t\t// AOV without also using albedo AOV (at least I got some oidn::Exception when I tried\n\t\t\t\t// using the normals without the albedo).\n\t\t\t\t// TODO this may have to do with wrong HIP buffers being used. Try this out again after we're using proper HIP buffers\n\t\t\t\tm_application_settings->denoiser_use_normals = false;\n\t\t\t\tm_render_window_denoiser->set_use_normals(false);\n\t\t\t}\n\n\t\t\tm_render_window_denoiser->finalize();\n\t\t}\n\t\tImGui::SameLine();\n\t\tif (ImGui::Checkbox(\"Denoise albedo\", &m_application_settings->denoiser_denoise_albedo))\n\t\t{\n\t\t\tm_application_settings->denoiser_settings_changed = true;\n\n\t\t\tm_render_window_denoiser->set_denoise_albedo(m_application_settings->denoiser_denoise_albedo);\n\t\t\tm_render_window_denoiser->finalize();\n\t\t}\n\t\tImGui::BeginDisabled(!m_application_settings->denoiser_use_albedo);\n\t\tif (ImGui::Checkbox(\"Use normals AOV\", &m_application_settings->denoiser_use_normals))\n\t\t{\n\t\t\tm_application_settings->denoiser_settings_changed = true;\n\n\t\t\tm_render_window_denoiser->set_use_normals(m_application_settings->denoiser_use_normals);\n\t\t\tm_render_window_denoiser->finalize();\n\t\t}\n\t\tImGui::SameLine();\n\t\tif (ImGui::Checkbox(\"Denoise normals\", &m_application_settings->denoiser_denoise_normals))\n\t\t{\n\t\t\tm_application_settings->denoiser_settings_changed = true;\n\n\t\t\tm_render_window_denoiser->set_denoise_normals(m_application_settings->denoiser_denoise_normals);\n\t\t\tm_render_window_denoiser->finalize();\n\t\t}\n\t\tImGui::EndDisabled();\n\t\tImGui::TreePop();\n\t}\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tDisplaySettings& display_settings = m_render_window->get_display_view_system()->get_display_settings();\n\tImGui::Checkbox(\"Only denoise when rendering is done\", &m_application_settings->denoise_when_rendering_done);\n\tImGui::SliderInt(\"Denoiser sample skip\", &m_application_settings->denoiser_sample_skip, 1, 128);\n\tif (ImGui::SliderFloat(\"Denoiser blend\", &display_settings.denoiser_blend, 0.0f, 1.0f))\n\t\tm_render_window->set_force_viewport_refresh(true);\n\tImGui::EndDisabled();\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::Text(\"Denoising time: %.3fms\", m_application_settings->last_denoised_duration / 1000.0f);\n\n\tImGui::TreePop();\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n}\n\nvoid ImGuiSettingsWindow::draw_post_process_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Post-processing\"))\n\t\treturn;\n\tImGui::TreePush(\"Post-processing tree\");\n\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\n\tif (ImGui::CollapsingHeader(\"Tone-mapping\"))\n\t{\n\t\tImGui::TreePush(\"Tonemapping post processing tree\");\n\n\t\tDisplaySettings& display_settings = m_render_window->get_display_view_system()->get_display_settings();\n\n\t\tbool changed = false;\n\t\tchanged |= ImGui::Checkbox(\"Do tonemapping\", &display_settings.do_tonemapping);\n\t\tchanged |= ImGui::SliderFloat(\"Gamma\", &display_settings.tone_mapping_gamma, 1.0f, 2.4f);\n\t\tchanged |= ImGui::SliderFloat(\"Exposure\", &display_settings.tone_mapping_exposure, 0.0f, 3.0f);\n\t\tif (changed)\n\t\t\tm_render_window->set_force_viewport_refresh(true);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\tstd::shared_ptr<GMoNRenderPass> gmon_render_pass = m_renderer->get_gmon_render_pass();\n\tGMoNGPUData& gmon_data = gmon_render_pass->get_gmon_data();\n\n\tif (!render_data.render_settings.accumulate)\n\t{\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tImGuiRenderer::add_warning(\"GMoN cannot be used without enabling accumulation.\");\n\t}\n\tImGui::BeginDisabled(!render_data.render_settings.accumulate);\n\tif (ImGui::CollapsingHeader(\"GMoN\"))\n\t{\n\t\tImGui::TreePush(\"GMoN tree post processing\");\n\n\t\tif (ImGui::Checkbox(\"Use GMoN\", &gmon_data.using_gmon))\n\t\t\ttoggle_gmon();\n\n\t\tif (HIPRTRenderSettings::DEBUG_DEV_GMON_BLEND_WEIGHTS)\n\t\t{\n\t\t\tif (ImGui::Checkbox(\"Auto blending weight\", &render_data.render_settings.DEBUG_gmon_auto_blending_weights))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tif (ImGui::SliderFloat(\"GMoN Divider\", &render_data.render_settings.DEBUG_GMON_DIVIDER, 1.0f, 10.0f))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tif (ImGui::SliderInt(\"GMoN Window size\", &render_data.render_settings.DEBUG_GMON_WINDOW_SIZE, 3, 21))\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::Text(\"GMoN Darkening: %f\", m_renderer->get_gmon_render_pass()->get_gmon_darkening());\n\t\t\tImGui::Text(\"Variance 1: %f\", m_renderer->get_gmon_render_pass()->m_DEBUG_LUMINANCE_VARIANCE1);\n\t\t\tImGui::Text(\"Variance 2: %f\", m_renderer->get_gmon_render_pass()->m_DEBUG_LUMINANCE_VARIANCE2);\n\t\t}\n\n\t\tImGuiRenderer::show_help_marker(\"Use GMoN for fireflies elimination.\\n\"\n\t\t\t\"The algorithm computes the median of means of the pixels as an estimator \"\n\t\t\t\"that is more robust than the simple mean usually used to average samples.\\n\"\n\t\t\t\"The algorithm is unbiased as long as enough samples are accumulated. If not \"\n\t\t\t\"enough samples are accumulated, the firefly elimination tends to be a bit too \"\n\t\t\t\"strong and the image will probably end up darker than expected, especially on high-variance scenes.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"Implementation following [Firefly removal in Monte Carlo rendering with adaptive Median of meaNs, Buisine et al., 2021]\");\n\n\t\tif (gmon_data.using_gmon)\n\t\t{\n\t\t\tImGui::Text(\"VRAM Usage: %.3fMB\", gmon_render_pass->get_VRAM_usage_bytes() / 1000000.0f);\n\n\t\t\tbool gmon_mode_changed = false;\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tImGui::Text(\"GMoN Mode\");\n\t\t\tgmon_mode_changed |= ImGui::RadioButton(\"Median of Means\", ((int*)&render_data.buffers.gmon_estimator.gmon_mode), 0); ImGui::SameLine();\n\t\t\tgmon_mode_changed |= ImGui::RadioButton(\"Binary G-MoN\", ((int*)&render_data.buffers.gmon_estimator.gmon_mode), 1); ImGui::SameLine();\n\t\t\tgmon_mode_changed |= ImGui::RadioButton(\"Adaptive G-MoN\", ((int*)&render_data.buffers.gmon_estimator.gmon_mode), 2);\n\t\t\tif (gmon_mode_changed)\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tstatic int number_of_sets = GMoNMSetsCount;\n\t\t\tif (ImGui::SliderInt(\"Number of sets (M)\", &number_of_sets, 3, 31))\n\t\t\t{\n\t\t\t\tnumber_of_sets = hippt::clamp(3, 31, number_of_sets);\n\n\t\t\t\tif (!(number_of_sets & 1))\n\t\t\t\t\t// number_of_sets is even but we only want odd\n\t\t\t\t\tnumber_of_sets--;\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"How many sets (M variable in the GMoN paper, [Buisine et al., 2021]) to use.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"A simple way to choose that number is: keep that number as low as possible as long as it removes the fireflies.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"As a general rule: more sets eliminate fireflies the best but more sets require more samples per \"\n\t\t\t\t\"pixel to avoid too much darkening, especially on high-variance scene. If your scene is very \"\n\t\t\t\t\"easy to render, you probably don't need many sets (less than 15, maybe even less than 11). If your scene has high \"\n\t\t\t\t\"variance caustics, you're probably going to need a lot of samples per pixel and so a large \"\n\t\t\t\t\"number of sets will be fine anyways.\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Said otherwise: if you're noticing too much darkening, try reducing the number of sets or \"\n\t\t\t\t\"try accumulating more samples per pixel.\\n\\n\");\n\t\t\t// If the user modified the number of sets, displaying an \"Apply\" button\n\t\t\tif (number_of_sets != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT))\n\t\t\t{\n\t\t\t\tImGui::TreePush(\"GMoN Apply number of sets tree\");\n\n\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t{\n\t\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::GMON_M_SETS_COUNT, number_of_sets);\n\n\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t\tImGui::TreePop();\n\t\t\t}\n\n\t\t\tif (ImGui::SliderFloat(\"GMoN blend factor\", &gmon_data.gmon_blend_factor, 0.0f, 1.0f))\n\t\t\t{\n\t\t\t\tgmon_data.gmon_auto_blend_factor = false;\n\t\t\t\tm_render_window->set_force_viewport_refresh(true);\n\t\t\t}\n\t\t\tImGui::SameLine();\n\t\t\tif (ImGui::Checkbox(\"Auto\", &gmon_data.gmon_auto_blend_factor))\n\t\t\t\tm_render_window->set_force_viewport_refresh(true);\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t\tif (m_render_window->get_display_view_system()->get_current_display_view_type() != DisplayViewType::GMON_BLEND)\n\t\t\t\tImGuiRenderer::add_warning(\"The display view currently in used isn't \\\"GMoN blend\\\" so the output of GMoN cannot be visualized.\");\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n\tImGui::EndDisabled();\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::TreePop();\n}\n\nvoid ImGuiSettingsWindow::toggle_gmon()\n{\n\tstd::shared_ptr<GMoNRenderPass> gmon_render_pass = m_renderer->get_gmon_render_pass();\n\tbool gmon_now_enabled = gmon_render_pass->get_gmon_data().using_gmon;\n\tif (m_render_window->get_display_view_system()->get_current_display_view_type() == DisplayViewType::DEFAULT && gmon_now_enabled)\n\t\t// We just enabled GMoN, automatically switching to the GMoN view for convenience\n\t\tm_render_window->get_display_view_system()->queue_display_view_change(DisplayViewType::GMON_BLEND);\n\n\tif (gmon_now_enabled && !gmon_render_pass->get_all_kernels()[GMoNRenderPass::COMPUTE_GMON_KERNEL]->has_been_compiled())\n\t\t// The GMoN kernel hasn't been compiled yet, compiling it\n\t\tm_renderer->recompile_kernels();\n\n\tm_render_window->set_render_dirty(true);\n}\n\nvoid ImGuiSettingsWindow::draw_quality_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Quality settings\"))\n\t\treturn;\n\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tImGui::TreePush(\"Quality settings tree\");\n\n\tif (ImGui::CollapsingHeader(\"Nested dielectrics\"))\n\t{\n\t\tImGui::TreePush(\"Nested dielectrics tree\");\n\n\t\tstatic int nested_dielectrics_stack_size = NestedDielectricsStackSize;\n\t\tif (ImGui::SliderInt(\"Stack Size\", &nested_dielectrics_stack_size, 3, 8))\n\t\t\tnested_dielectrics_stack_size = std::max(1, nested_dielectrics_stack_size);\n\t\tImGui::Text(\"Max nested dielectrics: %d\", nested_dielectrics_stack_size - 3);\n\t\tImGuiRenderer::show_help_marker(\"How many nested dielectrics objects can be present in the scene with the \"\n\t\t\t\"current nested dielectrics stack size\");\n\n\t\tif (nested_dielectrics_stack_size != global_kernel_options->get_macro_value(GPUKernelCompilerOptions::NESTED_DIELETRCICS_STACK_SIZE_OPTION))\n\t\t{\n\t\t\tImGui::TreePush(\"Apply button nested dielectric stack size\");\n\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t{\n\t\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::NESTED_DIELETRCICS_STACK_SIZE_OPTION, nested_dielectrics_stack_size);\n\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_renderer->resize_g_buffer_ray_volume_states();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Alpha testing\"))\n\t{\n\t\tImGui::TreePush(\"Alpha testing tree\");\n\n\t\tif (ImGui::Checkbox(\"Do alpha testing\", &render_settings.do_alpha_testing))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\tif (ImGui::SliderInt(\"Max bounce\", &render_settings.alpha_testing_indirect_bounce, 0, render_settings.nb_bounces + 1, \"%d\"))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGuiRenderer::show_help_marker(\"At what bounce to stop doing alpha testing.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"A value of 0 means that alpha testing isn't done at bounce 0 which means that even camera \"\n\t\t\t\"rays do not do alpha testing --> alpha testing is disabled.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"A value of 1 means that camera rays do alpha testing but the next bounce rays do not do alpha \"\n\t\t\t\"testing.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"Shadow rays for NEE are also affected by this setting.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"This feature helps with performance on scenes with medium/a lot of alpha tested geometry.\");\n\t\tif (render_settings.alpha_testing_indirect_bounce > render_settings.nb_bounces)\n\t\t\tImGui::Text(\"Alpha tests always enabled.\");\n\t\tif (render_settings.alpha_testing_indirect_bounce == 0)\n\t\t\tImGui::Text(\"Alpha tests always disabled.\");\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f)); \n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Normal mapping\"))\n\t{\n\t\tImGui::TreePush(\"Normal mapping tree\");\n\n\t\tif (ImGui::Checkbox(\"Do normal mapping\", &render_settings.do_normal_mapping))\n\t\t\tm_render_window->set_render_dirty(true);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Triangle sampling\"))\n\t{\n\t\tImGui::TreePush(\"Triangle sampling tree\");\n\n\t\tconst char* items_triangle_sampling[] = { \"- Turk 1990\", \"- Heitz 2019\" };\n\t\tconst char* tooltips_triangle_sampling[] = {\n\t\t\t\"Common way of warping from a square to a triangle using square roots:\\n\"\n\t\t\t\"V = (1.0f - sqrt(u1))* V1 + sqrt(u1)* (s2* V2 + (1.0f - s2) * V3)\",\n\n\t\t\t\"Implementation of[A Low - Distortion Map Between Triangle and Square, Heitz, 2019]\\n\"\n\t\t\t\"It is faster than Turk method's and better perserves the stratification of the random \"\n\t\t\t\"number samplers\"\n\t\t};\n\t\tif (ImGuiRenderer::ComboWithTooltips(\"Triangle point sampling strategy\", global_kernel_options->get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::TRIANGLE_POINT_SAMPLING_STRATEGY), items_triangle_sampling, IM_ARRAYSIZE(items_triangle_sampling), tooltips_triangle_sampling))\n\t\t{\n\t\t\tm_renderer->recompile_kernels();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Light clamping\"))\n\t{\n\t\tImGui::TreePush(\"Lighting Settings Performance Tree\");\n\n\t\tImGui::SeparatorText(\"Clamping\");\n\t\tif (ImGui::SliderFloat(\"Direct lighting\", &render_settings.direct_contribution_clamp, 0.0f, 10.0f))\n\t\t{\n\t\t\trender_settings.direct_contribution_clamp = std::max(0.0f, render_settings.direct_contribution_clamp);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tif (ImGui::SliderFloat(\"Envmap ligthing\", &render_settings.envmap_contribution_clamp, 0.0f, 10.0f))\n\t\t{\n\t\t\trender_settings.envmap_contribution_clamp = std::max(0.0f, render_settings.envmap_contribution_clamp);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tif (ImGui::SliderFloat(\"Indirect ligthing\", &render_settings.indirect_contribution_clamp, 0.0f, 10.0f))\n\t\t{\n\t\t\trender_settings.indirect_contribution_clamp = std::max(0.0f, render_settings.indirect_contribution_clamp);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::SliderFloat(\"Minimum light contribution\", &render_settings.minimum_light_contribution, 0.0f, 10.0f))\n\t\t{\n\t\t\trender_settings.minimum_light_contribution = std::max(0.0f, render_settings.minimum_light_contribution);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"If a selected light (for direct lighting estimation) contributes at a given \"\n\t\t\t\" point less than this 'minimum_light_contribution' value then the light sample is discarded. \"\n\t\t\t\"This can improve performance at the cost of some bias depending on the scene.\\n\"\n\t\t\t\"0.0f to disable\");\n\n\t\tbool allow_backfacing_lihts = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS);\n\t\tif (ImGui::Checkbox(\"Allow backfacing lights\", &allow_backfacing_lihts))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_ALLOW_BACKFACING_LIGHTS, allow_backfacing_lihts ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\tm_renderer->recompile_kernels();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Microfacet regularization\"))\n\t\tdraw_microfacet_model_regularization_tree();\n\n\tImGui::TreePop();\n}\n\nvoid ImGuiSettingsWindow::draw_microfacet_model_regularization_tree()\n{\n\tImGui::TreePush(\"Microfacet regularization\");\n\n\tHIPRTRenderData& render_data = m_renderer->get_render_data();\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\n\tbool regularize_bsdf = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION);\n\tif (ImGui::Checkbox(\"Do microfacet model regularization\", &regularize_bsdf))\n\t{\n\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION, regularize_bsdf ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\tm_renderer->recompile_kernels();\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\tif (regularize_bsdf && render_data.bsdfs_data.GGX_masking_shadowing == GGXMaskingShadowingFlavor::HeightCorrelated)\n\t{\n\t\tImGuiRenderer::add_warning(\"Microfacet model regularization cannot be used with height-correlated masking shadowing\");\n\n\t\tImGui::TreePush(\"Use height uncorrelated button tree\");\n\t\tif (ImGui::Button(\"Switch to height-uncorrelated masking shadowing\"))\n\t\t\trender_data.bsdfs_data.GGX_masking_shadowing = GGXMaskingShadowingFlavor::HeightUncorrelated;\n\t\tImGui::TreePop();\n\t}\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tif (regularize_bsdf)\n\t{\n\t\tbool do_consistent_tau = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION);\n\t\tif (ImGui::Checkbox(\"Consistent parameterization\", &do_consistent_tau))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DO_MICROFACET_REGULARIZATION_CONSISTENT_PARAMETERIZATION, do_consistent_tau ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\tm_renderer->recompile_kernels();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"With this feature enabled, tau will refine over time to help sharpen caustics a bit\"\n\t\t\t\" while keeping variance in check.\");\n\t\tif (do_consistent_tau)\n\t\t{\n\t\t\tImGui::TreePush(\"Consistent tau tree\");\n\t\t\tImGui::Text(\"Current tau: %f\", MicrofacetRegularization::consistent_tau(render_data.bsdfs_data.microfacet_regularization.tau_0, render_data.render_settings.sample_number));\n\t\t\tImGui::TreePop();\n\t\t}\n\t\tbool do_diffusion_heuristic = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC);\n\t\tif (ImGui::Checkbox(\"Use diffusion heuristic\", &do_diffusion_heuristic))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_MICROFACET_REGULARIZATION_DIFFUSION_HEURISTIC, do_diffusion_heuristic ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\t\tm_renderer->recompile_kernels();\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"Whether or not to take the path's roughness into account when regularizing the BSDFs.\\n\"\n\t\t\t\"This feature is essential to keep highlights sharp on directly visible surfaces.\");\n\n\t\tstd::string tau_text = std::string(\"Tau\") + (do_consistent_tau ? \"_0\" : \"\");\n\t\tif (ImGui::SliderFloat(tau_text.c_str(), &render_data.bsdfs_data.microfacet_regularization.tau_0, 10.0f, 1000.0f))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGuiRenderer::show_help_marker(\"Main parameter to control the regularization. The lower this parameter, the stronger the regularization.\\n\\n\"\n\t\t\t\"Note that if \\\"Consistent parameterization\\\" is enabled, this parameter will be adjusted dynamically (starting from the given value) based on the number \"\n\t\t\t\"of samples rendered so far.\");\n\t\tif (ImGui::SliderFloat(\"Minimum roughness\", &render_data.bsdfs_data.microfacet_regularization.min_roughness, 0.0f, 1.0f))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGuiRenderer::show_help_marker(\"All materials in the scene will at least have that much roughness.\\n\\n\"\n\t\t\t\"Useful when lights are so small that even camera ray jittering causes variance and so roughening the surface helps \"\n\t\t\t\"BSDF rays hit the light source more often (and light samples too). The main purpose is to help with very sharp glossy highlights. \"\n\t\t\t\"Regularization is only applied during NEE so the direct apperance of smooth objects isn't affected.\");\n\t}\n\n\tImGui::TreePop();\n}\n\nvoid ImGuiSettingsWindow::draw_performance_settings_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tif (!ImGui::CollapsingHeader(\"Performance Settings\"))\n\t\treturn;\n\n\tImGui::TreePush(\"Performance settings tree\");\n\n\tImGui::Text(\"Device: %s\", m_renderer->get_device_properties().name);\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tstd::shared_ptr<GPUKernelCompilerOptions> global_kernel_options = m_renderer->get_global_compiler_options();\n\tHardwareAccelerationSupport hwi_supported = m_renderer->device_supports_hardware_acceleration();\n\n\tif (ImGui::CollapsingHeader(\"General Settings\"))\n\t{\n\t\tImGui::TreePush(\"Perf settings general settings tree\");\n\n\t\tif (ImGui::InputFloat(\"GPU Stall Percentage\", &m_application_settings->GPU_stall_percentage))\n\t\t\tm_application_settings->GPU_stall_percentage = std::max(0.0f, std::min(m_application_settings->GPU_stall_percentage, 99.9f));\n\t\tImGuiRenderer::show_help_marker(\"How much percent of the time the GPU will be forced to be idle (not rendering anything).\"\n\t\t\t\t\t\t\t\t\t\t\" This feature is basically only meant for GPUs that get too hot to avoid burning your GPUs during long renders if you have\"\n\t\t\t\t\t\t\t\t\t\t\" time to spare.\");\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tdraw_russian_roulette_options();\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tbool do_direction_reuse = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE);\n\t\tif (ImGui::Checkbox(\"Warp BSDF sampled directions reuse\", &do_direction_reuse))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DO_FIRST_BOUNCE_WARP_DIRECTION_REUSE, do_direction_reuse ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\tm_renderer->recompile_kernels();\n\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"Partial and experimental implementation of[Generate Coherent Rays Directly, Liu et al., 2024] \"\n\t\t\t\"for reuse sampled directions on the first hit accross the threads of warps\");\n\n\t\tbool delta_distrib_opti = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION);\n\t\tif (ImGui::Checkbox(\"BSDF delta distribution optimization\", &delta_distrib_opti))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::PRINCIPLED_BSDF_DELTA_DISTRIBUTION_EVALUATION_OPTIMIZATION, delta_distrib_opti ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\tm_renderer->recompile_kernels();\n\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"If this is true, then delta distribution lobes of the principled BSDF will not be evaluated \"\n\t\t\t\"if the incident light direction used for the evaluation doesn't come from sampling the \"\n\t\t\t\" delta distribution lobe itself.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"For example, consider a clearcoat diffuse lobe. If bsdf_eval() is called with an \"\n\t\t\t\"incident light direction that was sampled from the diffuse lobe, the perfectly smooth clearcoat lobe \"\n\t\t\t\"is going to have its contribution evaluate to 0 because there is no chance that the sampled \"\n\t\t\t\"diffuse direction perfectly aligns with the delta of the smooth clearcoat lobe.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"Same with all the other lobes that can be delta distributions.\\n\\n\"\n\t\t\t\"\"\n\t\t\t\"There is basically no point in disabling that, this is just for performance comparisons.\");\n\t\tif (!delta_distrib_opti && global_kernel_options->get_macro_value(GPUKernelCompilerOptions::PATH_SAMPLING_STRATEGY) == PSS_RESTIR_GI)\n\t\t{\n\t\t\tImGuiRenderer::add_warning(\"Due to numerical float imprecisions, errors on specular surfaces (especially glass) \"\n\t\t\t\t\"are expected with ReSTIR GI if not using \\\"BSDF delta distribution optimization\\\".\"\n\t\t\t\t\"\\nThis will manifest as darkening on perfectly specular surfaces (delta distributions).\\n\\n\"\n\t\t\t\t\"\"\n\t\t\t\t\"Enable \\\"BSDF delta distribution optimization\\\" to get rid of this issue.\");\n\t\t}\n\n\t\tbool direct_light_delta_distrib_opti = global_kernel_options->get_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION);\n\t\tif (ImGui::Checkbox(\"NEE delta distribution optimization\", &direct_light_delta_distrib_opti))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(GPUKernelCompilerOptions::DIRECT_LIGHT_SAMPLING_BSDF_DELTA_DISTRIBUTION_OPTIMIZATION, direct_light_delta_distrib_opti ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\tm_renderer->recompile_kernels();\n\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"If this is true, then NEE light samples will not even be attempted of delta distribution materials.\"\n\t\t\t\"This is because for delta distribution materials, an arbitrary incident light direction will always produce a 0-contribution outgoing radiance \"\n\t\t\t\"so doing NEE with light samples on these materials is useless and we can save some computations here.\");\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Ray-tracing settings\"))\n\t{\n\t\tImGui::TreePush(\"Ray-tracing settings tree\");\n\n\t\tstatic bool use_hardware_acceleration = global_kernel_options->has_macro(\"__USE_HWI__\");\n\t\tImGui::BeginDisabled(hwi_supported != HardwareAccelerationSupport::SUPPORTED);\n\t\tif (ImGui::Checkbox(\"Use ray tracing hardware acceleration\", &use_hardware_acceleration))\n\t\t{\n\t\t\tglobal_kernel_options->set_macro_value(\"__USE_HWI__\", use_hardware_acceleration);\n\n\t\t\tm_renderer->recompile_kernels();\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\t// Printing a custom tooltip depending on whether or not we support hardware acceleration\n\t\t// and, if not supported, why we don't support it \n\t\tswitch (hwi_supported)\n\t\t{\n\t\tcase SUPPORTED:\n\t\t\tImGuiRenderer::show_help_marker(\"Whether or not to enable hardware accelerated ray tracing (bbox & triangle intersections)\");\n\t\t\tbreak;\n\n\t\tcase AMD_UNSUPPORTED:\n\t\t\tImGuiRenderer::show_help_marker(\"Hardware accelerated ray tracing is only supported on RDNA2+ AMD GPUs.\");\n\t\t\tbreak;\n\n\t\tcase NVIDIA_UNSUPPORTED:\n\t\t\tImGuiRenderer::show_help_marker(\"HIPRT cannot access NVIDIA's proprietary hardware accelerated ray-tracing. Hardware ray-tracing unavailable.\");\n\t\t\tbreak;\n\t\t}\n\n\t\tbool bvh_needs_rebuild = false;\n\t\tstatic int build_type_chosen = 0;\n\t\tstd::vector<const char*> bvh_items = { \"- SBVH\", \"- HPLOC\", \"- LBVH\"};\n\t\tbvh_needs_rebuild |= ImGui::Combo(\"BVH Build\", &build_type_chosen, bvh_items.data(), bvh_items.size());\n\n\t\tstatic bool do_triangle_splits = true;\n\t\tbvh_needs_rebuild |= ImGui::Checkbox(\"Do triangle splits\", &do_triangle_splits);\n\n\t\t/*static bool do_triangle_pairing = true;\n\t\tbvh_needs_rebuild |= ImGui::Checkbox(\"Do triangle pairing\", &do_triangle_pairing);*/\n\n\t\tstatic bool do_bvh_compaction = true;\n\t\tbvh_needs_rebuild |= ImGui::Checkbox(\"Do BVH compaction\", &do_bvh_compaction);\n\n\t\tif (bvh_needs_rebuild)\n\t\t{\n\t\t\thiprtBuildFlags build_flags = 0;\n\t\t\tswitch (build_type_chosen)\n\t\t\t{\n\t\t\tcase 0:\n\t\t\t\t// SBVH\n\t\t\t\tbuild_flags |= hiprtBuildFlagBitPreferHighQualityBuild;\n\t\t\t\tbreak;\n\n\t\t\tcase 1:\n\t\t\t\t// HPLOC\n\t\t\t\tbuild_flags |= hiprtBuildFlagBitPreferBalancedBuild;\n\t\t\t\tbreak;\n\n\t\t\tcase 2:\n\t\t\t\t// LBVH\n\t\t\t\tbuild_flags |= hiprtBuildFlagBitPreferFastBuild;\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\t/*if (!do_triangle_pairing)\n\t\t\t\tbuild_flags |= hiprtBuildFlagBitDisableTrianglePairing;*/\n\n\t\t\tif (!do_triangle_splits)\n\t\t\t\tbuild_flags |= hiprtBuildFlagBitDisableSpatialSplits;\n\n\t\t\tm_renderer->rebuild_whole_scene_bvh(build_flags, do_bvh_compaction);\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Kernel settings\"))\n\t{\n\t\tImGui::TreePush(\"Shared/global stack Traversal Options Tree\");\n\n\t\t// List of exceptions because these kernels do not trace any rays\n\t\tstatic std::vector<std::string> kernel_names;\n\t\tstatic std::map<std::string, std::shared_ptr<GPUKernel>> kernels = m_renderer->get_tracing_kernels();\n\t\tif (kernel_names.empty())\n\t\t\t// Filling the kernel names if not already done\n\t\t\tfor (const auto& name_to_kernel : kernels)\n\t\t\t\tkernel_names.push_back(name_to_kernel.first);\n\n\t\tstatic std::string selected_kernel_name = FillGBufferRenderPass::FILL_GBUFFER_KERNEL;\n\t\tstatic std::shared_ptr<GPUKernel> selected_kernel = kernels[selected_kernel_name];\n\t\tstatic GPUKernelCompilerOptions* selected_kernel_options = &selected_kernel->get_kernel_options();\n\n\t\tif (ImGui::BeginCombo(\"Kernel\", selected_kernel_name.c_str()))\n\t\t{\n\t\t\tfor (const std::string& kernel_name : kernel_names)\n\t\t\t{\n\t\t\t\tconst bool is_selected = (selected_kernel_name == kernel_name);\n\t\t\t\tif (ImGui::Selectable(kernel_name.c_str(), is_selected))\n\t\t\t\t{\n\t\t\t\t\tselected_kernel_name = kernel_name;\n\t\t\t\t\tselected_kernel = kernels[selected_kernel_name];\n\t\t\t\t\tselected_kernel_options = &selected_kernel->get_kernel_options();\n\t\t\t\t}\n\n\t\t\t\tif (is_selected)\n\t\t\t\t\tImGui::SetItemDefaultFocus();\n\t\t\t}\n\t\t\tImGui::EndCombo();\n\t\t}\n\n\n\n\n\t\tImGui::TreePush(\"Kernel selection for stack size\");\n\n\t\t{\n\t\t\tstatic std::unordered_map<std::string, bool> use_shared_stack_traversal;\n\t\t\tif (use_shared_stack_traversal.find(selected_kernel_name) == use_shared_stack_traversal.end())\n\t\t\t\tuse_shared_stack_traversal[selected_kernel_name] = selected_kernel_options->get_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL);\n\t\t\tbool& use_shared_stack_traversal_bool = use_shared_stack_traversal[selected_kernel_name];\n\n\t\t\tif (ImGui::Checkbox(\"Use shared/global stack BVH traversal\", &use_shared_stack_traversal_bool))\n\t\t\t{\n\t\t\t\tselected_kernel_options->set_macro_value(GPUKernelCompilerOptions::USE_SHARED_STACK_BVH_TRAVERSAL, use_shared_stack_traversal_bool ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t}\n\t\t\tImGuiRenderer::show_help_marker(\"If checked, shared memory + a globally allocated buffer will be used for the BVH \"\n\t\t\t\t\t\t\t\t\t\t\t\"traversal of rays.\\n\"\n\t\t\t\t\t\t\t\t\t\t\t\"This incurs an additional cost in VRAM but improves traversal performance.\");\n\n\n\n\n\n\t\t\tif (use_shared_stack_traversal_bool)\n\t\t\t{\n\t\t\t\tstatic std::unordered_map<std::string, int> pending_stack_size_changes;\n\t\t\t\tif (pending_stack_size_changes.find(selected_kernel_name) == pending_stack_size_changes.end())\n\t\t\t\t\tpending_stack_size_changes[selected_kernel_name] = selected_kernel_options->get_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE);\n\t\t\t\tint& pending_stack_size = pending_stack_size_changes[selected_kernel_name];\n\n\t\t\t\tImGui::PushItemWidth(8 * ImGui::GetFontSize());\n\t\t\t\tif (ImGui::InputInt(\"Shared stack size\", &pending_stack_size))\n\t\t\t\t\tpending_stack_size = std::max(0, pending_stack_size);\n\t\t\t\tImGui::PopItemWidth();\n\n\t\t\t\tImGuiRenderer::show_help_marker(\"Fast shared memory stack used for the BVH traversal of \\\"global\\\" rays (rays that search for a closest hit with no maximum distance)\\n\\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\"Allocating more of this speeds up the BVH traversal but reduces the amount of L1 cache available to \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"the rest of the shader which thus reduces its performance. A tradeoff must be made.\\n\\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\"If this shared memory stack isn't large enough for traversing the BVH, then \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"it is complemented by using the global stack buffer. If both combined aren't enough \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"for the traversal, then artifacts start showing up in renders.\\n\\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\"Note that setting this value to 0 disables the shared stack usage but still uses the global buffer \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"for traversal. This approach is still better that not using any of these two memories at all (this \"\n\t\t\t\t\t\t\t\t\t\t\t\t\"becomes the case when the checkboxes above are not checked.)\");\n\n\t\t\t\tif (pending_stack_size != selected_kernel_options->get_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE))\n\t\t\t\t{\n\t\t\t\t\t// If the user has modified the size of the shared stack, showing a button to apply the changes \n\t\t\t\t\t// (not applying the changes everytime because this requires a recompilation of basically all shaders and that's heavy)\n\n\t\t\t\t\tImGui::TreePush(\"Apply button shared stack size\");\n\t\t\t\t\tif (ImGui::Button(\"Apply\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tselected_kernel_options->set_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE, pending_stack_size);\n\t\t\t\t\t\tm_renderer->recompile_kernels();\n\t\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t\t}\n\t\t\t\t\tImGui::TreePop();\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tImGui::TreePop();\n\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::PushItemWidth(8 * ImGui::GetFontSize());\n\t\tif (ImGui::InputInt(\"Global stack per-thread size\", &m_renderer->get_render_data().global_traversal_stack_buffer_size))\n\t\t{\n\t\t\tm_renderer->get_render_data().global_traversal_stack_buffer_size = hippt::clamp(0, 128, m_renderer->get_render_data().global_traversal_stack_buffer_size);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGui::PopItemWidth();\n\n\t\tImGuiRenderer::show_help_marker(\"Size of the global stack buffer for each thread. Used for complementing the shared memory stack allocated in the kernels.\"\n\t\t\t\t\t\t\t\t\t\t\"A good value for this parameter is scene-complexity dependent.\\n\\n\"\n\t\t\t\t\t\t\t\t\t\t\"A lower value will use less VRAM but will start introducing artifacts if the value is too low due \"\n\t\t\t\t\t\t\t\t\t\t\"to insufficient stack size for the BVH traversal.\\n\\n\"\n\t\t\t\t\t\t\t\t\t\t\"16 seems to be a good value to start with. If lowering this value improves performance, then that \"\n\t\t\t\t\t\t\t\t\t\t\"means that the BVH traversal is starting to suffer (the traversal is incomplete --> improved performance) \"\n\t\t\t\t\t\t\t\t\t\t\"and rendering artifacts will start to show up.\");\n\n\t\tstd::string size_string = \"Global Stack Buffer VRAM Usage: \";\n\t\tsize_string += std::to_string(m_renderer->get_render_data().global_traversal_stack_buffer_size * std::ceil(m_renderer->m_render_resolution.x / 8.0f) * 8.0f * std::ceil(m_renderer->m_render_resolution.y / 8.0f) * 8.0f * sizeof(int) / 1000000.0f);\n\t\tsize_string += \" MB\";\n\t\tImGui::Text(\"%s\", size_string.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n\n\tif (ImGui::CollapsingHeader(\"Lighting settings\"))\n\t{\n\t\tImGui::TreePush(\"Lighting settings tree\");\n\n\t\tdraw_next_event_estimation_plus_plus_panel();\n\n\t\tImGui::TreePop();\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::TreePop();\n}\n\nvoid ImGuiSettingsWindow::draw_performance_metrics_panel()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tif (!ImGui::CollapsingHeader(\"Performance Metrics\"))\n\t\treturn;\n\n\tImGui::TreePush(\"Performance metrics tree\");\n\n\tImGui::Text(\"Device: %s\", m_renderer->get_device_properties().name);\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tif (ImGui::Button(\"Apply benchmark settings\"))\n\t{\n\t\trender_settings.freeze_random = true;\n\t\trender_settings.enable_adaptive_sampling = false;\n\n\t\tm_render_window->set_render_dirty(true);\n\t}\n\tif (ImGui::Checkbox(\"Freeze random\", (bool*)&render_settings.freeze_random))\n\t\tm_render_window->set_render_dirty(true);\n\tif (ImGui::InputInt(\"Samples per frame\", &render_settings.samples_per_frame))\n\t{\n\t\t// Clamping to 1\n\t\trender_settings.samples_per_frame = std::max(1, render_settings.samples_per_frame);\n\n\t\t// If the user manually changed to number of samples per frame, let's disable auto sample per frame\n\t\t// because the user probably doesn't want it\n\t\tm_application_settings->auto_sample_per_frame = false;\n\t}\n\tImGui::SameLine();\n\tImGui::Checkbox(\"Auto\", &m_application_settings->auto_sample_per_frame);\n\n\tbool rolling_window_size_changed = false;\n\tint rolling_window_size = m_render_window_perf_metrics->get_window_size();\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tImGui::Text(\"Measures Window Size\"); ImGui::SameLine();\n\trolling_window_size_changed |= ImGui::RadioButton(\"25\", &rolling_window_size, 25); ImGui::SameLine();\n\trolling_window_size_changed |= ImGui::RadioButton(\"100\", &rolling_window_size, 100); ImGui::SameLine();\n\trolling_window_size_changed |= ImGui::RadioButton(\"250\", &rolling_window_size, 250); ImGui::SameLine();\n\trolling_window_size_changed |= ImGui::RadioButton(\"1000\", &rolling_window_size, 1000);\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tif (rolling_window_size_changed)\n\t\tm_render_window_perf_metrics->resize_window(rolling_window_size);\n\n\tRenderGraph& render_graph = m_renderer->get_render_graph();\n\tfor (auto& name_to_render_pass : render_graph.get_render_passes())\n\t{\n\t\tconst std::map<std::string, std::shared_ptr<GPUKernel>>& render_pass_kernels = name_to_render_pass.second->get_all_kernels();\n\t\tif (!render_pass_kernels.empty())\n\t\t{\n\t\t\tImGui::SeparatorText(name_to_render_pass.first.c_str());\n\n\t\t\tImGui::TreePush(name_to_render_pass.first.c_str());\n\t\t\tfor (auto& name_to_kernel : render_pass_kernels)\n\t\t\t\tdraw_perf_metric_specific_panel(m_render_window_perf_metrics, name_to_kernel.first, name_to_kernel.first);\n\t\t\tImGui::TreePop();\n\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\t}\n\n\t}\n\n\tdraw_perf_metric_specific_panel(m_render_window_perf_metrics, RenderWindow::PERF_METRICS_CPU_OVERHEAD_TIME_KEY, \"CPU Overhead\");\n\tImGui::Separator();\n\tdraw_perf_metric_specific_panel(m_render_window_perf_metrics, GPURenderer::ALL_RENDER_PASSES_TIME_KEY, \"Total sample time (GPU)\");\n\tdraw_perf_metric_specific_panel(m_render_window_perf_metrics, GPURenderer::FULL_FRAME_TIME_WITH_CPU_KEY, \"Total sample time (+CPU)\");\n\tif (m_debug_trace_kernel_selected != 0)\n\t{\n\t\tImGui::Separator();\n\t\tdraw_perf_metric_specific_panel(m_render_window_perf_metrics, GPURenderer::DEBUG_KERNEL_TIME_KEY, \"Debug trace kernel\");\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\tImGui::TreePop();\n}\n\nvoid ImGuiSettingsWindow::draw_perf_metric_specific_panel(std::shared_ptr<PerformanceMetricsComputer> perf_metrics, const std::string& perf_metrics_key, const std::string& label)\n{\n\tfloat variance, min, max;\n\tvariance = perf_metrics->get_variance(perf_metrics_key);\n\tmin = perf_metrics->get_min(perf_metrics_key);\n\tmax = perf_metrics->get_max(perf_metrics_key);\n\n\tstatic std::unordered_map<std::string, bool> key_to_display_graph;\n\tif (key_to_display_graph.find(perf_metrics_key) == key_to_display_graph.end())\n\t\tkey_to_display_graph[perf_metrics_key] = false;\n\n\t// Pusing the ID for that perf key metrics so that no ImGui widgets collide\n\tImGui::PushID(perf_metrics_key.c_str());\n\n\tImGui::Text(\"%s: %.3fms (%.1f FPS)\", label.c_str(), perf_metrics->get_current_value(perf_metrics_key), 1000.0f / perf_metrics->get_average(perf_metrics_key));\n\tif (ImGui::IsItemHovered(ImGuiHoveredFlags_AllowWhenDisabled))\n\t{\n\t\tstd::string line_1 = format_perf_metrics_tooltip_line(label, \" (avg):\", \" (min / max):\", \" %.3fms (%.1f FPS)\", perf_metrics->get_average(perf_metrics_key), 1000.0f / perf_metrics->get_average(perf_metrics_key));\n\t\tstd::string line_2 = format_perf_metrics_tooltip_line(label, \" (var):\", \" (min / max):\", \" %.3fms\", variance);\n\t\tstd::string line_3 = format_perf_metrics_tooltip_line(label, \" (std dev):\", \" (min / max):\", \" %.3fms\", std::sqrt(variance));\n\t\tstd::string line_4 = format_perf_metrics_tooltip_line(label, \" (min / max):\", \" (min / max):\", \" %.3fms / %.3fms\", min, max);\n\n\t\tstd::string tooltip = line_1 + \"\\n\" + line_2 + \"\\n\" + line_3 + \"\\n\" + line_4;\n\t\tImGuiRenderer::add_tooltip(tooltip);\n\t}\n\n\tImGui::SameLine();\n\tImGui::Checkbox(\"Show graph\", &key_to_display_graph[perf_metrics_key]);\n\tif (key_to_display_graph[perf_metrics_key])\n\t{\n\t\tstatic std::unordered_map<std::string, std::pair<float, float>> key_to_min_max;\n\t\tif (key_to_min_max.find(perf_metrics_key) == key_to_min_max.end())\n\t\t\tkey_to_min_max[perf_metrics_key] = std::make_pair(min, max);\n\n\t\tfloat& scale_min = key_to_min_max[perf_metrics_key].first;\n\t\tfloat& scale_max = key_to_min_max[perf_metrics_key].second;\n\t\tscale_min = perf_metrics->get_data_index(perf_metrics_key) == 0 ? min : scale_min;\n\t\tscale_max = perf_metrics->get_data_index(perf_metrics_key) == 0 ? max : scale_max;\n\n\t\tImGui::PlotHistogram(\"\",\n\t\t\tPerformanceMetricsComputer::data_getter,\n\t\t\tperf_metrics->get_data(perf_metrics_key).data(),\n\t\t\tperf_metrics->get_value_count(perf_metrics_key),\n\t\t\t/* value offset */0,\n\t\t\tlabel.c_str(),\n\t\t\tscale_min, scale_max,\n\t\t\t/* size */ ImVec2(0, 80));\n\n\t\tstatic std::unordered_map<std::string, bool> key_to_auto_rescale;\n\t\tif (key_to_auto_rescale.find(perf_metrics_key) == key_to_auto_rescale.end())\n\t\t\tkey_to_auto_rescale[perf_metrics_key] = true;\n\n\t\tbool& auto_rescale = key_to_auto_rescale[perf_metrics_key];\n\t\tImGui::SameLine();\n\t\tif (ImGui::Button(\"Rescale\") || auto_rescale)\n\t\t{\n\t\t\tscale_min = min;\n\t\t\tscale_max = max;\n\t\t}\n\t\tImGui::SameLine();\n\t\tImGui::Checkbox(\"Auto-rescale\", &auto_rescale);\n\t}\n\n\t// Popping the ID for that perf key metrics\n\tImGui::PopID();\n}\n\ntemplate <class... Args>\nstd::string ImGuiSettingsWindow::format_perf_metrics_tooltip_line(const std::string& label, const std::string& suffix, const std::string& longest_header_for_padding, const std::string& formatter_after_header, const Args& ...args)\n{\n\t// Creating the formatter for automatically left-padding the header of the lines to the longer line (which is \"(min / max)\")\n\tstd::string header_padding_formatter = \"%-\" + std::to_string(label.length() + longest_header_for_padding.length()) + \"s\";\n\tstd::string line_formatter = header_padding_formatter + formatter_after_header;\n\tstd::string header = label + suffix;\n\n\tchar line_char[512];\n\tstd::string test = \"%s\";\n\tsnprintf(line_char, 512, line_formatter.c_str(), header.c_str(), args...);\n\n\treturn std::string(line_char);\n}\n\nextern bool g_background_shader_compilation_enabled;\nvoid ImGuiSettingsWindow::draw_shader_kernels_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Shaders/Kernels\"))\n\t{\n\t\tImGui::TreePush(\"Shaders kernels tree\");\n\n\t\tif (ImGui::Button(\"Hard shaders reload\"))\n\t\t{\n\t\t\tm_renderer->recompile_kernels(false);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"Forces the recompilation of the shaders without using the shader cache.\");\n\t\tif (ImGui::Button(\"Soft shaders reload\"))\n\t\t{\n\t\t\tm_renderer->recompile_kernels(true);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\tImGuiRenderer::show_help_marker(\"Recompiles the shaders using the shader cache.\");\n\n\t\tif (ImGui::Button(\"Clear shader cache\"))\n\t\t\tstd::filesystem::remove_all(\"shader_cache\");\n\t\tImGuiRenderer::show_help_marker(\"Completely clears the shader cache on the disk.\");\n\n\t\tstatic GPUKernelCompiler::ShaderCacheUsageOverride shader_cache_use_override = g_gpu_kernel_compiler.get_shader_cache_usage_override();\n\t\tstd::vector<const char*> shader_cache_override_values = { \"No override\", \"Do not use shader cache\", \"Always use shader cache\" };\n\t\tif (ImGui::Combo(\"Shader cache use override\", (int*)&shader_cache_use_override, shader_cache_override_values.data(), shader_cache_override_values.size()))\n\t\t\tg_gpu_kernel_compiler.set_shader_cache_usage_override(shader_cache_use_override);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::CollapsingHeader(\"Kernels compilation statistics\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Kernel compilation statistics tree\");\n\n\t\t\tImGui::Text(\"Kernel [Registers, Shared Memory, Local Memory]\");\n\t\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\n\t\t\t// Computing the longest kernel name for aligning everything\n\t\t\tsize_t longest_kernel_name = 0;\n\t\t\tfor (auto kernel_name_to_kernel : m_renderer->get_all_kernels())\n\t\t\t\tlongest_kernel_name = hippt::max(longest_kernel_name, kernel_name_to_kernel.first.length());\n\t\t\tstd::string padding_formatter = \"%-\" + std::to_string(longest_kernel_name) + \"s\";\n\n\t\t\tfor (auto kernel_name_to_kernel : m_renderer->get_all_kernels())\n\t\t\t{\n\t\t\t\tconst std::string& kernel_name = kernel_name_to_kernel.first;\n\t\t\t\tconst std::shared_ptr<GPUKernel> kernel = kernel_name_to_kernel.second;\n\n\t\t\t\tif (kernel->has_been_compiled())\n\t\t\t\t{\n\t\t\t\t\tint nb_reg = kernel->get_kernel_attribute(ORO_FUNC_ATTRIBUTE_NUM_REGS);\n\t\t\t\t\tint nb_shared = kernel->get_kernel_attribute(ORO_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES);\n\t\t\t\t\tint nb_local = kernel->get_kernel_attribute(ORO_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES);\n\n\t\t\t\t\tImGui::PushID(kernel_name.c_str());\n\t\t\t\t\tif (ImGui::Button(\"C\"))\n\t\t\t\t\t{\n\t\t\t\t\t\tstd::string commandline_string;\n\t\t\t\t\t\t\n\t\t\t\t\t\tcommandline_string = \"hipcc -x hip \";\n\n\t\t\t\t\t\tstd::vector<std::string> options = kernel->get_kernel_options().get_all_macros_as_std_vector_string();\n\t\t\t\t\t\tfor (std::string& option : options)\n\t\t\t\t\t\t\tcommandline_string += option + \" \";\n\n\t\t\t\t\t\tstd::vector<std::string> include_directories = GPUKernel::COMMON_ADDITIONAL_KERNEL_INCLUDE_DIRS;\n\t\t\t\t\t\tfor (std::string& include_dir : include_directories)\n\t\t\t\t\t\t\tcommandline_string += \"-I\" + include_dir + \" \";\n\n\t\t\t\t\t\t// For debugging info and assembly-source code line correspondences\n\t\t\t\t\t\tcommandline_string += \"-std=c++17 -gline-tables-only --save-temps \";\n\t\t\t\t\t\t// For hardware ray tracing instructions\n\t\t\t\t\t\tcommandline_string += \"--offload-arch=gfx1100\";\n\t\t\t\t\t\t// Source file that hipcc compiles\n\t\t\t\t\t\tcommandline_string += \" ../src/llvm-compile-kernel.h\";\n\n\t\t\t\t\t\t// For outputting the disassembly + source line correspondances to a .txt and opening it with notepad++\n\t\t\t\t\t\tcommandline_string += \" && llvm-objdump --no-show-raw-insn -S llvm-compile-kernel-hip-amdgcn-amd-amdhsa-gfx1100.out > assembly.txt && notepad++.exe assembly.txt &\";\n\n\t\t\t\t\t\tImGui::SetClipboardText(commandline_string.c_str());\n\t\t\t\t\t}\n\t\t\t\t\tImGuiRenderer::add_tooltip(\"Copies the hipcc compilation command to the clipboard.\");\n\t\t\t\t\tImGui::PopID();\n\n\t\t\t\t\tImGui::SameLine();\n\t\t\t\t\tstd::string text = padding_formatter + \" [%d, %d, %d]\";\n\t\t\t\t\tImGui::Text(text.c_str(), kernel_name.c_str(), nb_reg, nb_shared, nb_local);\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tstd::string text = padding_formatter + \" [Not compiled]\";\n\t\t\t\t\tImGui::Text(text.c_str(), kernel_name.c_str());\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiSettingsWindow::draw_debug_panel()\n{\n\tif (!ImGui::CollapsingHeader(\"Debug\"))\n\t\treturn;\n\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tImGui::TreePush(\"Debug tree\");\n\n\tif (ImGui::CollapsingHeader(\"Debug/WIP options\"))\n\t{\n\t\tImGui::TreePush(\"Debug options tree\");\n\n\t\tif (ImGui::Checkbox(\"Enable direct\", &render_settings.enable_direct))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGui::PushItemWidth(24 * ImGui::GetFontSize());\n\n\t\tif (ImGui::Checkbox(\"Correlate ReGIR\", &render_settings.regir_settings.DEBUG_CORRELATE_rEGIR))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tif (ImGui::SliderInt(\"ReGIR Pre integration iterations\", &render_settings.DEBUG_REGIR_PRE_INTEGRATION_ITERATIONS, 1, 64))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tif (ImGui::SliderInt(\"ReGIR Pre integration sample per res\", &render_settings.DEBUG_REGIR_PRE_INTEGRATION_SAMPLE_COUNT_PER_RESERVOIR, 1, 64\t))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tif (ImGui::Checkbox(\"Normalize RIS integral\", &render_settings.regir_settings.DEBUG_DO_RIS_INTEGRAL_NORMALIZATION))\n\t\t\tm_render_window->set_render_dirty(true);\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tstatic int debug_index = 0;\n\t\tif (ImGui::InputInt(\"Debug index\", &debug_index))\n\t\t\tdebug_index = hippt::clamp(0, 1023, debug_index);\n\t\tunsigned long long int sum_count = OrochiBuffer<unsigned long long int>::download_data(reinterpret_cast<unsigned long long int*>(render_settings.DEBUG_SUM_COUNT) + debug_index, 1)[0];\n\t\tunsigned long long int sums = OrochiBuffer<unsigned long long int>::download_data(reinterpret_cast<unsigned long long int*>(render_settings.DEBUG_SUM_TOTAL) + debug_index, 1)[0];\n\t\tImGui::Text(\"Debug sum count / sums / ratio:\"\n\t\t\t\"\\n\\t%llu\"\n\t\t\t\"\\n\\t%llu\"\n\t\t\t\"\\n\\t%f\", sum_count, sums, sum_count / (double)sums);\n\n\t\tImGui::TreePop();\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t}\n\n\tif (ImGui::Checkbox(\"Show NaNs\", &m_renderer->get_render_settings().display_NaNs))\n\t\tm_render_window->set_render_dirty(true);\n\tImGuiRenderer::show_help_marker(\"If true, NaNs that occur during the rendering will show up as pink pixels.\");\n\n\tif (ImGui::Checkbox(\"White furnace mode\", &m_renderer->get_render_data().bsdfs_data.white_furnace_mode))\n\t\tm_render_window->set_render_dirty(true);\n\tif (m_renderer->get_render_data().bsdfs_data.white_furnace_mode)\n\t{\n\t\tImGui::TreePush(\"White furnace tree\");\n\t\tif (ImGui::Checkbox(\"Turn off emissives\", &m_renderer->get_render_data().bsdfs_data.white_furnace_mode_turn_off_emissives))\n\t\t\tm_render_window->set_render_dirty(true);\n\t\tImGui::TreePop();\n\t}\n\n\tstatic bool display_only_sample = DisplayOnlySampleN;\n\tif (ImGui::Checkbox(\"Display only sample N\", &display_only_sample))\n\t{\n\t\tm_renderer->get_global_compiler_options()->set_macro_value(GPUKernelCompilerOptions::DISPLAY_ONLY_SAMPLE_N, display_only_sample ? KERNEL_OPTION_TRUE : KERNEL_OPTION_FALSE);\n\n\t\tm_render_window->set_render_dirty(true);\n\t\tm_renderer->recompile_kernels();\n\t}\n\tif (display_only_sample)\n\t{\n\t\tImGui::SameLine();\n\t\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n\t\tif (ImGui::InputInt(\"\", &m_renderer->get_render_data().render_settings.output_debug_sample_N))\n\t\t\tm_render_window->set_render_dirty(true);\n\n\t\tstatic bool auto_sample = true;\n\t\tImGui::SameLine();\n\t\tImGui::Checkbox(\"Auto\", &auto_sample);\n\t\tif (auto_sample)\n\t\t{\n\t\t\tint new_sample_count = m_render_window->get_application_settings()->max_sample_count - 1;\n\n\t\t\tif (m_renderer->get_render_data().render_settings.output_debug_sample_N != new_sample_count)\n\t\t\t\tm_render_window->set_render_dirty(true);\n\n\t\t\tm_renderer->get_render_data().render_settings.output_debug_sample_N = m_render_window->get_application_settings()->max_sample_count - 1;\n\t\t}\n\t}\n\n\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\tstd::vector<const char*> trace_kernel_items = { \"None\", \"TraceTest\" };\n\tif (ImGui::Combo(\"Override trace kernel\", &m_debug_trace_kernel_selected, trace_kernel_items.data(), trace_kernel_items.size()))\n\t{\n\t\tif (m_debug_trace_kernel_selected != 0)\n\t\t{\n\t\t\tm_debug_trace_kernel_options = *m_renderer->get_global_compiler_options().get();\n\t\t\tm_debug_trace_kernel_options.set_macro_value(\"__USE_HWI__\", 1);\n\n\t\t\tm_renderer->set_debug_trace_kernel(trace_kernel_items[m_debug_trace_kernel_selected], m_debug_trace_kernel_options);\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t\telse\n\t\t{\n\t\t\t// Disabling the debug trace kernel\n\t\t\tm_renderer->set_debug_trace_kernel(\"\");\n\t\t\tm_render_window->set_render_dirty(true);\n\t\t}\n\t}\n\n\tImGui::TreePush(\"DebugTraceKernelOptions\");\n\tswitch (m_debug_trace_kernel_selected)\n\t{\n\tcase 1:\n\t\tImGui::InputInt(\"BVH Traversal Shared Mem\", m_debug_trace_kernel_options.get_raw_pointer_to_macro_value(GPUKernelCompilerOptions::SHARED_STACK_BVH_TRAVERSAL_SIZE));\n\t\tif (ImGui::Button(\"Apply\"))\n\t\t\tm_renderer->set_debug_trace_kernel(trace_kernel_items[m_debug_trace_kernel_selected], m_debug_trace_kernel_options);\n\n\t\tbreak;\n\n\tdefault:\n\t\tbreak;\n\t}\n\tImGui::TreePop();\n\n\tImGui::TreePop();\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiSettingsWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_SETTINGS_WINDOW_H\n#define IMGUI_SETTINGS_WINDOW_H\n\n#include \"Compiler/GPUKernelCompilerOptions.h\"\n#include \"UI/ImGui/ImGuiRendererPerformancePreset.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n\n#include \"imgui.h\"\n\nclass RenderWindow;\nclass GPURenderer;\n\nclass ImGuiSettingsWindow\n{\npublic:\n\tstatic const char* TITLE;\n\tstatic const float BASE_SIZE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\tvoid set_status_text(const std::string& new_status_text);\n\tstd::string get_status_text() const;\n\n\tvoid draw();\n\tstatic void draw_camera_panel_static(const std::string& panel_title, RenderWindow* render_window, std::shared_ptr<GPURenderer> renderer);\n\nprivate:\n\tvoid draw_header();\n\tvoid draw_render_settings_panel();\n\tvoid draw_render_stopping_conditions_panel();\n\tvoid draw_russian_roulette_options();\n\tvoid display_view_selector();\n\tbool display_view_disabled(DisplayViewType display_view_type);\n\tvoid display_view_tooltip(DisplayViewType display_view_type);\n\tvoid display_view_disabled_action(DisplayViewType display_view_type);\n\tvoid apply_performance_preset(ImGuiRendererSettingsPreset performance_preset);\n\tvoid draw_camera_panel();\n\t// Static because we call this method from other ImGui classes to be able\n\t// to render the same panel\n\tvoid draw_environment_panel();\n\n\tvoid draw_sampling_panel();\n\tvoid draw_ReGIR_settings_panel();\n\ttemplate <bool IsReSTIRGI>\n\tvoid draw_ReSTIR_neighbor_heuristics_panel();\n\ttemplate<bool IsReSTIRGI>\n\tvoid draw_ReSTIR_temporal_reuse_panel(std::function<void(void)> draw_before_panel = {});\n\ttemplate<bool IsReSTIRGI>\n\tvoid draw_ReSTIR_spatial_reuse_panel(std::function<void(void)> draw_before_panel = {});\n\ttemplate <bool IsReSTIRGI>\n\tvoid draw_ReSTIR_bias_correction_panel();\n\tvoid draw_next_event_estimation_plus_plus_panel();\n\tbool use_next_event_estimation_checkbox(const std::string& text = \"Use NEE++\");\n\tvoid draw_principled_bsdf_energy_conservation();\n\tvoid display_ReSTIR_DI_bias_status(std::shared_ptr<GPUKernelCompilerOptions> kernel_options);\n\n\tvoid draw_denoiser_panel();\n\tvoid draw_post_process_panel();\n\tvoid draw_quality_panel();\n\tvoid draw_microfacet_model_regularization_tree();\n\n\tvoid toggle_gmon();\n\n\tvoid draw_performance_settings_panel();\n\tvoid draw_perf_metric_specific_panel(std::shared_ptr<PerformanceMetricsComputer> perf_metrics, const std::string& perf_metrics_key, const std::string& label);\n\ttemplate <class... Args>\n\tstd::string format_perf_metrics_tooltip_line(const std::string& label, const std::string& suffix, const std::string& longest_header_for_padding, const std::string& formatter_after_header, const Args& ...args);\n\n\tvoid draw_performance_metrics_panel();\n\tvoid draw_shader_kernels_panel();\n\tvoid draw_debug_panel();\n\n\t// What debug trace kernel is selected in the \"Debug\" panel\n\tint m_debug_trace_kernel_selected = 0;\n\tGPUKernelCompilerOptions m_debug_trace_kernel_options;\n\n\tRenderWindow* m_render_window = nullptr;\n\n\tstd::shared_ptr<ApplicationSettings> m_application_settings = nullptr;\n\tstd::shared_ptr<GPURenderer> m_renderer = nullptr;\n\tstd::shared_ptr<OpenImageDenoiser> m_render_window_denoiser = nullptr;\n\tstd::shared_ptr<PerformanceMetricsComputer> m_render_window_perf_metrics = nullptr;\n\n\tImVec2 m_current_size;\n\n\t// This is a status text that anyone can be modify (asynchronously = race conditions)\n\t// to display a message in the settings window.\n\t//\n\t// This is mostly used by RenderPasses to indicate if some kind of preprocessing of the scene\n\t// is currently running for example\n\tstd::string m_status_text = \"\";\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiToolsWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Renderer/Baker/GGXConductorDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXGlassDirectionalAlbedoSettings.h\"\n#include \"Renderer/Baker/GGXThinGlassDirectionalAlbedoSettings.h\"\n#include \"UI/ImGui/ImGuiToolsWindow.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"imgui.h\"\n#include \"misc/cpp/imgui_stdlib.h\"\n\nconst char* ImGuiToolsWindow::TITLE = \"Tools\";\n\nvoid ImGuiToolsWindow::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n\n\tm_renderer = m_render_window->get_renderer();\n}\n\nvoid ImGuiToolsWindow::draw()\n{\n\tImGui::Begin(ImGuiToolsWindow::TITLE);\n\n\tImGui::PushItemWidth(16 * ImGui::GetFontSize());\n\n\tdraw_ggx_energy_compensation_panel();\n\tdraw_image_difference_panel();\n\n\tImGui::PopItemWidth();\n\n\tImGui::End();\n}\n\nvoid ImGuiToolsWindow::draw_ggx_energy_compensation_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Baking\"))\n\t{\n\t\tImGui::TreePush(\"Baking tree\");\n\n\t\tif (ImGui::CollapsingHeader(\"GGX Energy compensation\"))\n\t\t{\n\t\t\tImGui::TreePush(\"Baking GGX Energy compensation tree\");\n\n\t\t\tdraw_GGX_conductors();\n\t\t\tdraw_GGX_fresnel();\n\t\t\tdraw_GGX_glass();\n\t\t\tdraw_GGX_thin_glass();\n\t\t\tdraw_glossy_dielectric();\n\n\t\t\tstatic std::vector<float> roughnesses = { 0.0f, 0.25f, 0.5f, 1.0f };\n\t\t\tstatic std::vector<float> iors = { 1.0f, 1.1f, 1.3f, 1.5f, 2.0f };\n\t\t\tstatic bool cooking = false;\n\t\t\tstatic bool next_step_ready = true;\n\t\t\tstatic int step = -1;\n\t\t\tint nb_steps = roughnesses.size() * iors.size();\n\n\t\t\tif (ImGui::Button(\"Start screenshotting\"))\n\t\t\t{\n\t\t\t\tstep = -1;\n\t\t\t\tcooking = true;\n\t\t\t}\n\n\t\t\tif (cooking)\n\t\t\t{\n\t\t\t\tif (next_step_ready && step < nb_steps - 1)\n\t\t\t\t{\n\t\t\t\t\tnext_step_ready = false;\n\t\t\t\t\tstep++;\n\n\t\t\t\t\tstd::vector<CPUMaterial> materials = m_renderer->get_current_materials();\n\t\t\t\t\tmaterials[0].ior = iors[step % iors.size()];\n\t\t\t\t\tmaterials[0].roughness = roughnesses[step / iors.size()];\n\t\t\t\t\tmaterials[0].make_safe();\n\n\t\t\t\t\tm_renderer->update_all_materials(materials);\n\t\t\t\t\tm_render_window->set_render_dirty(true);\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tif (m_render_window->is_rendering_done() && m_renderer->get_render_settings().sample_number > m_renderer->get_render_settings().adaptive_sampling_min_samples)\n\t\t\t\t\t{\n\t\t\t\t\t\tstd::string filename = \"Screenshot\" + std::to_string(roughnesses[step / iors.size()]) + \"x\" + std::to_string(iors[step % iors.size()]) + \" - \" + std::to_string(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_COS_THETA_O) + \"x\" + std::to_string(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_ROUGHNESS) + \"x\" + std::to_string(GPUBakerConstants::GGX_THIN_GLASS_DIRECTIONAL_ALBEDO_TEXTURE_SIZE_IOR) + \"x\" + \".png\";\n\t\t\t\t\t\tm_render_window->get_screenshoter()->write_to_png(filename);\n\n\t\t\t\t\t\tnext_step_ready = true;\n\t\t\t\t\t\tif (step == nb_steps - 1)\n\t\t\t\t\t\t\tcooking = false;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::TreePop();\n\t}\n}\n\n/**\n * Panel for the GGX conductors directional albedo\n */\nvoid ImGuiToolsWindow::draw_GGX_conductors()\n{\n\tif (ImGui::CollapsingHeader(\"GGX Conductors Directional Albedo\"))\n\t{\n\t\tImGui::TreePush(\"GGX_E tree\");\n\n\t\tstatic GGXConductorDirectionalAlbedoSettings ggx_dir_albedo_settings;\n\n\t\tstatic bool filename_modified = false;\n\t\tstatic std::string output_filename;\n\n\t\tImGui::InputInt(\"Texture Size - Cos Theta\", &ggx_dir_albedo_settings.texture_size_cos_theta);\n\t\tImGui::InputInt(\"Texture Size - Roughness\", &ggx_dir_albedo_settings.texture_size_roughness);\n\t\tImGui::InputInt(\"Integration Sample Count\", &ggx_dir_albedo_settings.integration_sample_count);\n\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\tImGui::Combo(\"GGX Masking-Shadowing\", (int*)&ggx_dir_albedo_settings.masking_shadowing_term, masking_shadowing_items.data(), masking_shadowing_items.size());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::InputText(\"Output Texture Filename\", &output_filename))\n\t\t\tfilename_modified = true;\n\n\t\tif (!filename_modified)\n\t\t\t// As long as the user hasn't touched the output filename,\n\t\t\t// we modify it automatically so that's its more convenient\n\t\t\toutput_filename = GPUBakerConstants::get_GGX_conductor_directional_albedo_texture_filename(ggx_dir_albedo_settings.masking_shadowing_term,\n\t\t\t\tggx_dir_albedo_settings.texture_size_cos_theta, \n\t\t\t\tggx_dir_albedo_settings.texture_size_roughness);\n\n\t\tstd::shared_ptr<GPUBaker> baker = m_render_window->get_baker();\n\n\t\tstatic bool bake_started_at_least_once = false;\n\t\tImGui::BeginDisabled(!baker->is_ggx_conductor_directional_albedo_bake_complete() && bake_started_at_least_once);\n\t\tif (ImGui::Button(\"Bake!\"))\n\t\t{\n\t\t\tbake_started_at_least_once = true;\n\t\t\t// This starts the baking job asynchronously and the texture is\n\t\t\t// automatically written to disk when the baking is done\n\t\t\tbaker->bake_ggx_conductor_directional_albedo(ggx_dir_albedo_settings, output_filename);\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\tstatic std::string baking_text = \"\";\n\t\tif (!baker->is_ggx_conductor_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking...\";\n\t\telse if (baker->is_ggx_conductor_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking complete!\";\n\n\t\tImGui::SameLine();\n\t\tImGui::Text(\"%s\", baking_text.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\n/**\n * Panel for the GGX fresnel directional albedo\n */\nvoid ImGuiToolsWindow::draw_GGX_fresnel()\n{\n\tif (ImGui::CollapsingHeader(\"GGX + Fresnel Directional Albedo\"))\n\t{\n\t\tImGui::TreePush(\"GGX_fresnel tree\");\n\n\t\tstatic GGXFresnelDirectionalAlbedoSettings ggx_fresnel_dir_albedo_settings;\n\n\t\tstatic bool filename_modified = false;\n\t\tstatic std::string output_filename;\n\n\t\tImGui::InputInt(\"Texture Size - Cos Theta\", &ggx_fresnel_dir_albedo_settings.texture_size_cos_theta);\n\t\tImGui::InputInt(\"Texture Size - Roughness\", &ggx_fresnel_dir_albedo_settings.texture_size_roughness);\n\t\tImGui::InputInt(\"Texture Size - IOR\", &ggx_fresnel_dir_albedo_settings.texture_size_ior);\n\t\tImGui::InputInt(\"Integration Sample Count\", &ggx_fresnel_dir_albedo_settings.integration_sample_count);\n\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\tImGui::Combo(\"GGX Masking-Shadowing\", (int*)&ggx_fresnel_dir_albedo_settings.masking_shadowing_term, masking_shadowing_items.data(), masking_shadowing_items.size());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::InputText(\"Output Texture Filename\", &output_filename))\n\t\t\tfilename_modified = true;\n\n\t\tif (!filename_modified)\n\t\t\t// As long as the user hasn't touched the output filename,\n\t\t\t// we modify it automatically so that's its more convenient\n\t\t\toutput_filename = GPUBakerConstants::get_GGX_fresnel_directional_albedo_texture_filename(ggx_fresnel_dir_albedo_settings.masking_shadowing_term,\n\t\t\t\tggx_fresnel_dir_albedo_settings.texture_size_cos_theta, \n\t\t\t\tggx_fresnel_dir_albedo_settings.texture_size_roughness, \n\t\t\t\tggx_fresnel_dir_albedo_settings.texture_size_ior);\n\n\t\tstd::shared_ptr<GPUBaker> baker = m_render_window->get_baker();\n\n\t\tstatic bool bake_started_at_least_once = false;\n\t\tImGui::BeginDisabled(!baker->is_ggx_fresnel_directional_albedo_bake_complete() && bake_started_at_least_once);\n\t\tif (ImGui::Button(\"Bake!\"))\n\t\t{\n\t\t\tbake_started_at_least_once = true;\n\t\t\t// This starts the baking job asynchronously and the texture is\n\t\t\t// automatically written to disk when the baking is done\n\t\t\tbaker->bake_ggx_fresnel_directional_albedo(ggx_fresnel_dir_albedo_settings, output_filename);\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\tstatic std::string baking_text = \"\";\n\t\tif (!baker->is_ggx_fresnel_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking...\";\n\t\telse if (baker->is_ggx_fresnel_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking complete!\";\n\n\t\tImGui::SameLine();\n\t\tImGui::Text(\"%s\", baking_text.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\n/**\n * Panel for the GGX directional albedo over the sphere for\n * glass material energy compensation\n */\nvoid ImGuiToolsWindow::draw_GGX_glass()\n{\n\tif (ImGui::CollapsingHeader(\"Glass Directional Albedo\"))\n\t{\n\t\tImGui::TreePush(\"GGX_E_glass tree\");\n\n\t\tstatic GGXGlassDirectionalAlbedoSettings ggx_glass_dir_albedo_settings;\n\n\t\tstatic bool filename_modified = false;\n\t\tstatic std::string output_filename;\n\n\t\tImGui::InputInt(\"Texture Size - Cos Theta\", &ggx_glass_dir_albedo_settings.texture_size_cos_theta_o);\n\t\tImGui::InputInt(\"Texture Size - Roughness\", &ggx_glass_dir_albedo_settings.texture_size_roughness);\n\t\tImGui::InputInt(\"Texture Size - IOR\", &ggx_glass_dir_albedo_settings.texture_size_ior);\n\t\tImGui::InputInt(\"Integration Sample Count\", &ggx_glass_dir_albedo_settings.integration_sample_count);\n\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\tImGui::Combo(\"GGX Masking-Shadowing\", (int*)&ggx_glass_dir_albedo_settings.masking_shadowing_term, masking_shadowing_items.data(), masking_shadowing_items.size());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::InputText(\"Output Texture Filename\", &output_filename))\n\t\t\tfilename_modified = true;\n\n\t\tif (!filename_modified)\n\t\t\t// As long as the user hasn't touched the output filename,\n\t\t\t// we modify it automatically so that's its more convenient\n\t\t\toutput_filename = GPUBakerConstants::get_GGX_glass_directional_albedo_texture_filename(ggx_glass_dir_albedo_settings.masking_shadowing_term,\n\t\t\t\tggx_glass_dir_albedo_settings.texture_size_cos_theta_o,\n\t\t\t\tggx_glass_dir_albedo_settings.texture_size_roughness,\n\t\t\t\tggx_glass_dir_albedo_settings.texture_size_ior);\n\n\t\tstd::shared_ptr<GPUBaker> baker = m_render_window->get_baker();\n\n\t\tstatic bool bake_started_at_least_once = false;\n\t\tImGui::BeginDisabled(!baker->is_ggx_glass_directional_albedo_bake_complete() && bake_started_at_least_once);\n\t\tif (ImGui::Button(\"Bake!\"))\n\t\t{\n\t\t\tbake_started_at_least_once = true;\n\t\t\t// This starts the baking job asynchronously and the texture is\n\t\t\t// automatically written to disk when the baking is done\n\t\t\tbaker->bake_ggx_glass_directional_albedo(ggx_glass_dir_albedo_settings, output_filename);\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\tstatic std::string baking_text = \"\";\n\t\tif (!baker->is_ggx_glass_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking...\";\n\t\telse if (baker->is_ggx_glass_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking complete!\";\n\n\t\tImGui::SameLine();\n\t\tImGui::Text(\"%s\", baking_text.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiToolsWindow::draw_GGX_thin_glass()\n{\n\tif (ImGui::CollapsingHeader(\"Thin Glass Directional Albedo\"))\n\t{\n\t\tImGui::TreePush(\"GGX_thin_glass tree\");\n\n\t\tstatic GGXThinGlassDirectionalAlbedoSettings ggx_thin_glass_dir_albedo_settings;\n\n\t\tstatic bool filename_modified = false;\n\t\tstatic std::string output_filename;\n\n\t\tImGui::InputInt(\"Texture Size - Cos Theta\", &ggx_thin_glass_dir_albedo_settings.texture_size_cos_theta_o);\n\t\tImGui::InputInt(\"Texture Size - Roughness\", &ggx_thin_glass_dir_albedo_settings.texture_size_roughness);\n\t\tImGui::InputInt(\"Texture Size - IOR\", &ggx_thin_glass_dir_albedo_settings.texture_size_ior);\n\t\tImGui::InputInt(\"Integration Sample Count\", &ggx_thin_glass_dir_albedo_settings.integration_sample_count);\n\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\tImGui::Combo(\"GGX Masking-Shadowing\", (int*)&ggx_thin_glass_dir_albedo_settings.masking_shadowing_term, masking_shadowing_items.data(), masking_shadowing_items.size());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::InputText(\"Output Texture Filename\", &output_filename))\n\t\t\tfilename_modified = true;\n\n\t\tif (!filename_modified)\n\t\t\t// As long as the user hasn't touched the output filename,\n\t\t\t// we modify it automatically so that's its more convenient\n\t\t\toutput_filename = GPUBakerConstants::get_GGX_thin_glass_directional_albedo_texture_filename(ggx_thin_glass_dir_albedo_settings.masking_shadowing_term,\n\t\t\t\tggx_thin_glass_dir_albedo_settings.texture_size_cos_theta_o,\n\t\t\t\tggx_thin_glass_dir_albedo_settings.texture_size_roughness,\n\t\t\t\tggx_thin_glass_dir_albedo_settings.texture_size_ior);\n\n\t\tstd::shared_ptr<GPUBaker> baker = m_render_window->get_baker();\n\n\t\tstatic bool bake_started_at_least_once = false;\n\t\tImGui::BeginDisabled(!baker->is_ggx_glass_directional_albedo_bake_complete() && bake_started_at_least_once);\n\t\tif (ImGui::Button(\"Bake!\"))\n\t\t{\n\t\t\tbake_started_at_least_once = true;\n\t\t\t// This starts the baking job asynchronously and the texture is\n\t\t\t// automatically written to disk when the baking is done\n\t\t\tbaker->bake_ggx_thin_glass_directional_albedo(ggx_thin_glass_dir_albedo_settings, output_filename);\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\tstatic std::string baking_text = \"\";\n\t\tif (!baker->is_ggx_thin_glass_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking...\";\n\t\telse if (baker->is_ggx_thin_glass_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking complete!\";\n\n\t\tImGui::SameLine();\n\t\tImGui::Text(\"%s\", baking_text.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\n/**\n * Panel for the glossy dielectric directional albedo\n */\nvoid ImGuiToolsWindow::draw_glossy_dielectric()\n{\n\tif (ImGui::CollapsingHeader(\"Glossy Dielectric Directional Albedo\"))\n\t{\n\t\tImGui::TreePush(\"Glossy Dielectric tree\");\n\n\t\tstatic GlossyDielectricDirectionalAlbedoSettings glossy_dielectric_albedo_settings;\n\n\t\tstatic bool filename_modified = false;\n\t\tstatic std::string output_filename;\n\n\t\tImGui::InputInt(\"Texture Size - Cos Theta\", &glossy_dielectric_albedo_settings.texture_size_cos_theta_o);\n\t\tImGui::InputInt(\"Texture Size - Roughness\", &glossy_dielectric_albedo_settings.texture_size_roughness);\n\t\tImGui::InputInt(\"Texture Size - IOR\", &glossy_dielectric_albedo_settings.texture_size_ior);\n\t\tImGui::InputInt(\"Integration Sample Count\", &glossy_dielectric_albedo_settings.integration_sample_count);\n\t\tstd::vector<const char*> masking_shadowing_items = { \"- Smith height-correlated\", \"- Smith height-uncorrelated\" };\n\t\tImGui::Combo(\"GGX Masking-Shadowing\", (int*)&glossy_dielectric_albedo_settings.masking_shadowing_term, masking_shadowing_items.data(), masking_shadowing_items.size());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (ImGui::InputText(\"Output Texture Filename\", &output_filename))\n\t\t\tfilename_modified = true;\n\n\t\tif (!filename_modified)\n\t\t\t// As long as the user hasn't touched the output filename,\n\t\t\t// we modify it automatically so that's its more convenient\n\t\t\toutput_filename = GPUBakerConstants::get_glossy_dielectric_directional_albedo_texture_filename(glossy_dielectric_albedo_settings.masking_shadowing_term,\n\t\t\t\tglossy_dielectric_albedo_settings.texture_size_cos_theta_o,\n\t\t\t\tglossy_dielectric_albedo_settings.texture_size_roughness,\n\t\t\t\tglossy_dielectric_albedo_settings.texture_size_ior);\n\n\t\tstd::shared_ptr<GPUBaker> baker = m_render_window->get_baker();\n\n\t\tstatic bool bake_started_at_least_once = false;\n\t\tImGui::BeginDisabled(!baker->is_glossy_dielectric_directional_albedo_bake_complete() && bake_started_at_least_once);\n\t\tif (ImGui::Button(\"Bake!\"))\n\t\t{\n\t\t\tbake_started_at_least_once = true;\n\t\t\t// This starts the baking job asynchronously and the texture is\n\t\t\t// automatically written to disk when the baking is done\n\t\t\tbaker->bake_glossy_dielectric_directional_albedo(glossy_dielectric_albedo_settings, output_filename);\n\t\t}\n\t\tImGui::EndDisabled();\n\n\t\tstatic std::string baking_text = \"\";\n\t\tif (!baker->is_glossy_dielectric_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking...\";\n\t\telse if (baker->is_glossy_dielectric_directional_albedo_bake_complete() && bake_started_at_least_once)\n\t\t\tbaking_text = \" Baking complete!\";\n\n\t\tImGui::SameLine();\n\t\tImGui::Text(\"%s\", baking_text.c_str());\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::TreePop();\n\t}\n}\n\nvoid ImGuiToolsWindow::draw_image_difference_panel()\n{\n\tif (ImGui::CollapsingHeader(\"Image difference\"))\n\t{\n\t\tImGui::TreePush(\"Image difference tree\");\n\n\t\tconst char* filters[] = {\"*.png\", \"*.jpg\"};\n\n\t\tstatic float error_value = 1.0f;\n\t\tstatic std::string status_text = \"\";\n\t\tstatic std::string reference_image_path = \"\";\n\t\tstatic std::string subject_image_path = \"\";\n\n\t\tstatic Image32Bit reference_image;\n\t\tstatic Image32Bit subject_image;\n\n\t\tImGui::SeparatorText(\"Reference image\");\n\t\tif (ImGui::Button(\"Select reference image\"))\n\t\t{\n\t\t\treference_image_path = Utils::open_file_dialog(filters, 2);\n\t\t\treference_image = Image32Bit::read_image(reference_image_path, 3, false);\n\t\t}\n\t\tif (reference_image_path != \"\")\n\t\t{\n\t\t\tImGui::TreePush(\"Reference image text tree\");\n\t\t\t\n\t\t\tif (ImGui::Button(\"C\"))\n\t\t\t\tUtils::copy_image_to_clipboard(reference_image);\n\t\t\tImGuiRenderer::add_tooltip(\"Copies the image to the clipboard\");\n\t\t\tstd::string filename = std::filesystem::path(reference_image_path).filename().string();\n\t\t\tImGui::SameLine();  ImGui::Text(\"%s\", filename.c_str());\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::SeparatorText(\"Subject image\");\n\t\tstatic std::string subject_image_text = \"\";\n\t\tif (ImGui::Button(\"Select subject image\"))\n\t\t{\n\t\t\tsubject_image_path = Utils::open_file_dialog(filters, 2);\n\t\t\tsubject_image = Image32Bit::read_image(subject_image_path, 3, false);\n\n\t\t\tsubject_image_text = std::filesystem::path(subject_image_path).filename().string();\n\t\t}\n\t\tImGui::SameLine();\n\t\tif (ImGui::Button(\"Use viewport\"))\n\t\t{\n\t\t\tsubject_image = Image32Bit(m_render_window->get_screenshoter()->get_image(), 3);\n\t\t\tsubject_image_text = \"Viewport\";\n\t\t}\n\t\tif (subject_image_text != \"\")\n\t\t{\n\t\t\tImGui::TreePush(\"Subject image text tree\");\n\n\t\t\tif (ImGui::Button(\"C\"))\n\t\t\t\tUtils::copy_image_to_clipboard(subject_image);\n\t\t\tImGuiRenderer::add_tooltip(\"Copies the image to the clipboard\");\n\t\t\tImGui::SameLine();\n\t\t\tImGui::Text(\"%s\", subject_image_text.c_str());\n\n\t\t\tImGui::TreePop();\n\t\t}\n\n\t\tbool ready_to_compute = reference_image.width != 0 && subject_image.width != 0;\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tImGui::BeginDisabled(!ready_to_compute);\n\t\tif (ImGui::Button(\"Compute MSE\"))\n\t\t{\n\t\t\tif (subject_image_text == \"Viewport\")\n\t\t\t\t// Updating the subject image with the viewport\n\t\t\t\tsubject_image = Image32Bit(m_render_window->get_screenshoter()->get_image(), 3);\n\n\t\t\tif (reference_image.width != subject_image.width ||\n\t\t\t\treference_image.height != subject_image.height)\n\t\t\t{\n\t\t\t\tstatus_text = \"Error: Images must have the same dimensions!\";\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\terror_value = Utils::compute_image_mse(reference_image, subject_image);\n\n\t\t\t\tstatus_text = std::string(\"MSE: \" + std::to_string(error_value));\n\t\t\t}\n\t\t}\n\n\t\tif (ImGui::Button(\"Compute RMSE\"))\n\t\t{\n\t\t\tif (subject_image_text == \"Viewport\")\n\t\t\t\t// Updating the subject image with the viewport\n\t\t\t\tsubject_image = Image32Bit(m_render_window->get_screenshoter()->get_image(), 3);\n\n\t\t\tif (reference_image.width != subject_image.width ||\n\t\t\t\treference_image.height != subject_image.height)\n\t\t\t{\n\t\t\t\tstatus_text = \"Error: Images must have the same dimensions!\";\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\terror_value = Utils::compute_image_rmse(reference_image, subject_image);\n\n\t\t\t\tstatus_text = std::string(\"RMSE: \" + std::to_string(error_value));\n\t\t\t}\n\t\t}\n\n\t\tstatic bool output_flip_error_map = false;\n\t\tif (ImGui::Button(\"Compute FLIP\"))\n\t\t{\n\t\t\tif (subject_image_text == \"Viewport\")\n\t\t\t\t// Updating the subject image with the viewport\n\t\t\t\tsubject_image = Image32Bit(m_render_window->get_screenshoter()->get_image(), 3);\n\n\t\t\tif (reference_image.width != subject_image.width ||\n\t\t\t\treference_image.height != subject_image.height)\n\t\t\t{\n\t\t\t\tstatus_text = \"Error: Images must have the same dimensions!\";\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tfloat* error_map = nullptr;\n\t\t\t\terror_value = Utils::compute_image_weighted_median_FLIP(reference_image, subject_image, &error_map);\n\n\t\t\t\tif (output_flip_error_map)\n\t\t\t\t\t// Write the error map to disk\n\t\t\t\t\tUtils::copy_image_to_clipboard(Image32Bit(error_map, reference_image.width, reference_image.height, 3));\n\t\t\t\tfree(error_map);\n\n\t\t\t\tstatus_text = std::string(\"FLIP: \" + std::to_string(error_value));\n\t\t\t}\n\t\t}\n\t\tImGui::TreePush(\"Output FLIP error map tree\");\n\t\tImGui::Checkbox(\"Copy error map to clipboard\", &output_flip_error_map);\n\t\tImGui::TreePop();\n\t\tImGui::EndDisabled();\n\n\t\tImGui::Dummy(ImVec2(0.0f, 20.0f));\n\t\tif (status_text != \"\")\n\t\t{\n\t\t\tif (ImGui::Button(\"C\"))\n\t\t\t\tImGui::SetClipboardText(std::to_string(error_value).c_str());\n\n\t\t\tImGuiRenderer::show_help_marker(\"Copies the error value to the clipboard\");\n\t\t\tImGui::SameLine();\n\t\t}\n\t\tImGui::Text(\"%s\", status_text.c_str());\n\n\t\tImGui::TreePop();\n\t}\n}\n"
  },
  {
    "path": "src/UI/ImGui/ImGuiToolsWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef IMGUI_TOOLS_WINDOW_H\n#define IMGUI_TOOLS_WINDOW_H\n\n#include \"Renderer/GPURenderer.h\"\n\nclass RenderWindow;\n\nclass ImGuiToolsWindow\n{\npublic:\n\tstatic const char* TITLE;\n\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid draw();\n\tvoid draw_ggx_energy_compensation_panel();\n\tvoid draw_GGX_conductors();\n\tvoid draw_GGX_fresnel();\n\tvoid draw_GGX_glass();\n\tvoid draw_GGX_thin_glass();\n\tvoid draw_glossy_dielectric();\n\n\tvoid draw_image_difference_panel();\n\nprivate:\n\tRenderWindow* m_render_window = nullptr;\n\n\tstd::shared_ptr<GPURenderer> m_renderer;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/Interaction/LinuxRenderWindowMouseInteractor.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/Interaction/LinuxRenderWindowMouseInteractor.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"GLFW/glfw3.h\"\n\n#include \"imgui.h\"\n\nvoid LinuxRenderWindowMouseInteractor::glfw_mouse_button_callback(GLFWwindow* window, int button, int action, int mods)\n{\n    void* user_pointer = glfwGetWindowUserPointer(window);\n    RenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n\n    std::shared_ptr<RenderWindowMouseInteractor> interactor_instance_ptr = render_window->get_mouse_interactor();\n    LinuxRenderWindowMouseInteractor* interactor_instance = static_cast<LinuxRenderWindowMouseInteractor*>(interactor_instance_ptr.get());\n\n    // If it is the render window that is hovered, we're going to move the camera so we take\n    // the inputs\n    bool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n    bool imgui_wants_mouse = ImGui::GetIO().WantCaptureMouse && !render_window_hovered;\n\n    switch (button)\n    {\n    case GLFW_MOUSE_BUTTON_LEFT:\n        interactor_instance->set_interacting_left_button((action == GLFW_PRESS) && !imgui_wants_mouse);\n\n        break;\n\n    case GLFW_MOUSE_BUTTON_RIGHT:\n        interactor_instance->set_interacting_right_button((action == GLFW_PRESS) && !imgui_wants_mouse);\n\n        break;\n    }\n\n    if (action == GLFW_PRESS)\n        // If the user just clicked over the render window, setting the boolean to true, false otherwise\n        interactor_instance->render_window_hovered_on_click = render_window_hovered;\n    else if (action == GLFW_RELEASE)\n        interactor_instance->render_window_hovered_on_click = false;\n\n    bool interacting = interactor_instance->is_interacting();\n    if (interacting)\n        glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);\n    else\n        glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);\n}\n\nvoid LinuxRenderWindowMouseInteractor::glfw_mouse_cursor_callback(GLFWwindow* window, double xpos, double ypos)\n{\n    ImGuiIO& io = ImGui::GetIO();\n    void* user_pointer = glfwGetWindowUserPointer(window);\n\n    // If it is the render window that is hovered, we're going to move the camera so we take\n    // the inputs\n    RenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n    std::shared_ptr<RenderWindowMouseInteractor> interactor_instance_ptr = render_window->get_mouse_interactor();\n    LinuxRenderWindowMouseInteractor* interactor_instance = static_cast<LinuxRenderWindowMouseInteractor*>(interactor_instance_ptr.get());\n\n    // If the render window was hovered when the user clicked, then the user is trying to move the camera\n    bool render_window_hovered_when_clicked = interactor_instance->render_window_hovered_on_click;\n\n    bool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n    bool imgui_wants_mouse = io.WantCaptureMouse && !render_window_hovered && !render_window_hovered_when_clicked;\n\n    if (!imgui_wants_mouse)\n    {\n        float xposf = static_cast<float>(xpos);\n        float yposf = static_cast<float>(ypos);\n\n        if (render_window_hovered_when_clicked)\n        {\n            std::pair<float, float> old_position = render_window->get_cursor_position();\n            if (old_position.first == -1 && old_position.second == -1)\n                // If this is the first position of the cursor, nothing to do\n                ;\n            else\n            {\n                // Computing the difference in movement\n                std::pair<float, float> difference = std::make_pair(xposf - old_position.first, yposf - old_position.second);\n                static int counter = 0;\n\n                if (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS)\n                    render_window->update_renderer_view_translation(-difference.first, difference.second, true);\n\n                if (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT) == GLFW_PRESS)\n                    render_window->update_renderer_view_rotation(-difference.first, -difference.second);\n            }\n\n        }\n\n        // Updating the position\n        render_window->set_cursor_position(std::make_pair(xposf, yposf));\n    }\n}\n\nvoid LinuxRenderWindowMouseInteractor::set_callbacks(GLFWwindow* window)\n{\n    glfwSetCursorPosCallback(window, LinuxRenderWindowMouseInteractor::glfw_mouse_cursor_callback);\n    glfwSetMouseButtonCallback(window, LinuxRenderWindowMouseInteractor::glfw_mouse_button_callback);\n\tglfwSetScrollCallback(window, RenderWindowMouseInteractor::glfw_mouse_scroll_callback);\n}"
  },
  {
    "path": "src/UI/Interaction/LinuxRenderWindowMouseInteractor.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef LINUX_RENDER_WINDOW_MOUSE_INTERACTOR_H\n#define LINUX_RENDER_WINDOW_MOUSE_INTERACTOR_H\n\n#include \"UI/Interaction/RenderWindowMouseInteractor.h\"\n\nstruct GLFWwindow;\n\nclass LinuxRenderWindowMouseInteractor : public RenderWindowMouseInteractor\n{\npublic:\n    void set_callbacks(GLFWwindow* window);\n\nprivate:\n    static void glfw_mouse_button_callback(GLFWwindow* window, int button, int action, int mods);\n    static void glfw_mouse_cursor_callback(GLFWwindow* window, double xpos, double ypos);\n\n    bool render_window_hovered_on_click = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/Interaction/RenderWindowKeyboardInteractor.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/RenderWindow.h\"\n#include \"UI/Interaction/RenderWindowKeyboardInteractor.h\"\n\nvoid RenderWindowKeyboardInteractor::glfw_key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)\n{\n\tvoid* user_pointer = glfwGetWindowUserPointer(window);\n\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n\n\t// We still want to process the inputs if we're hovering the render window because then\n\t// we *are* trying to move the camera with the keyboard\n\tbool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n\n\tImGuiIO& io = ImGui::GetIO();\n\tif (io.WantCaptureKeyboard && !render_window_hovered && !(action == GLFW_RELEASE))\n\t\t// We always want to handle release key otherwise we could press a key while\n\t\t// hovering the render window and then release the with our mouse over another window\n\t\t// --> not hovering the render window --> the key won't be released and the camera\n\t\t// will keep moving\n\t\treturn;\n\n\tRenderWindowKeyboardInteractor& interactor_instance = render_window->get_keyboard_interactor();\n\tswitch (key)\n\t{\n\tcase GLFW_KEY_W:\n\tcase GLFW_KEY_Z:\n\t\tinteractor_instance.m_z_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\t\tbreak;\n\n\tcase GLFW_KEY_A:\n\tcase GLFW_KEY_Q:\n\t\tinteractor_instance.m_q_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\t\tbreak;\n\n\tcase GLFW_KEY_S:\n\t\tinteractor_instance.m_s_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\n\t\tbreak;\n\n\tcase GLFW_KEY_D:\n\t\tinteractor_instance.m_d_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\t\tbreak;\n\n\tcase GLFW_KEY_SPACE:\n\t\tinteractor_instance.m_space_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\t\tbreak;\n\n\tcase GLFW_KEY_LEFT_SHIFT:\n\t\tinteractor_instance.m_lshift_pressed = (action == GLFW_PRESS) || (action == GLFW_REPEAT);\n\t\tbreak;\n\n\tdefault:\n\t\tbreak;\n\t}\n}\n\nvoid RenderWindowKeyboardInteractor::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n}\n\nvoid RenderWindowKeyboardInteractor::set_callbacks(GLFWwindow* window)\n{\n\tglfwSetKeyCallback(window, RenderWindowKeyboardInteractor::glfw_key_callback);\n}\n\nvoid RenderWindowKeyboardInteractor::poll_keyboard_inputs()\n{\n\tfloat zoom = 0.0f;\n\tstd::pair<float, float> translation = { 0.0f, 0.0f };\n\n\tif (m_z_pressed)\n\t\tzoom += 1.0f;\n\tif (m_q_pressed)\n\t\ttranslation.first += 1.0f;\n\tif (m_s_pressed)\n\t\tzoom -= 1.0f;\n\tif (m_d_pressed)\n\t\ttranslation.first -= 1.0f;\n\tif (m_space_pressed)\n\t\ttranslation.second += 1.0f;\n\tif (m_lshift_pressed)\n\t\ttranslation.second -= 1.0f;\n\n\tif (!(m_z_pressed || m_q_pressed || m_s_pressed || m_d_pressed || m_space_pressed || m_lshift_pressed))\n\t\t// Nothing to do\n\t\treturn;\n\n\tm_render_window->update_renderer_view_translation(-translation.first, translation.second, true);\n\tm_render_window->update_renderer_view_zoom(-zoom, true);\n}\n\nbool RenderWindowKeyboardInteractor::is_interacting()\n{\n\treturn m_z_pressed || m_q_pressed || m_s_pressed || m_d_pressed || m_space_pressed || m_lshift_pressed;\n}\n"
  },
  {
    "path": "src/UI/Interaction/RenderWindowKeyboardInteractor.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDER_WINDOW_KEYBOARD_INTERACTOR_H\n#define RENDER_WINDOW_KEYBOARD_INTERACTOR_H\n\nstruct GLFWwindow;\nclass RenderWindow;\n\nclass RenderWindowKeyboardInteractor\n{\npublic:\n\tvoid set_render_window(RenderWindow* render_window);\n\tvoid set_callbacks(GLFWwindow* window);\n\n\tstatic void glfw_key_callback(GLFWwindow* window, int key, int scancode, int action, int mods);\n\n\t/**\n\t * Looks at the key states of the interactor and manipulates\n\t * the queue_frame_for_render window to reflect on these pressed keys.\n\t */\n\tvoid poll_keyboard_inputs();\n\n\t/**\n\t * Returns true if any key (only keys relevant to this interactor) is currently being held down.\n\t * Returns false otherwise.\n\t */\n\tbool is_interacting();\n\nprotected:\n\tbool m_z_pressed = false;\n\tbool m_q_pressed = false;\n\tbool m_s_pressed = false;\n\tbool m_d_pressed = false;\n\tbool m_space_pressed = false;\n\tbool m_lshift_pressed = false;\n\n\tRenderWindow* m_render_window = nullptr;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/Interaction/RenderWindowMouseInteractor.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/Interaction/RenderWindowMouseInteractor.h\"\n#include \"UI/RenderWindow.h\"\n\nvoid RenderWindowMouseInteractor::glfw_mouse_scroll_callback(GLFWwindow* window, double xoffset, double yoffset)\n{\n    ImGuiIO& io = ImGui::GetIO();\n    void* user_pointer = glfwGetWindowUserPointer(window);\n\n    // If it is the render window that is hovered, we're going to move the camera so we take\n    // the inputs\n    RenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n    bool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n    bool imgui_want_mouse = io.WantCaptureMouse && !render_window_hovered;\n    if (!imgui_want_mouse)\n    {\n        RenderWindow* render_window = reinterpret_cast<RenderWindow*>(glfwGetWindowUserPointer(window));\n\n        yoffset = std::copysignf(1.0f, yoffset);\n\n        // Because the mouse scroll isn't a continuous input, we can't use the delta time of the application reliably\n        // to scale the speed of the zoom in the scene so we're hardcoding an arbitrary 12.0f here that proved to be\n        // okay good\n        render_window->update_renderer_view_zoom(static_cast<float>(-yoffset / 12.0f), false);\n    }\n}\n\nbool RenderWindowMouseInteractor::is_interacting()\n{\n    return  m_interacting_left_button || m_interacting_right_button;\n}\n\nvoid RenderWindowMouseInteractor::set_interacting_left_button(bool interacting)\n{\n    m_interacting_left_button = interacting;\n}\n\nvoid RenderWindowMouseInteractor::set_interacting_right_button(bool interacting)\n{\n    m_interacting_right_button = interacting;\n}\n\nbool RenderWindowMouseInteractor::is_interacting_right_button()\n{\n    return m_interacting_right_button;\n}\n\nbool RenderWindowMouseInteractor::is_interacting_left_button()\n{\n    return m_interacting_left_button;\n}\n"
  },
  {
    "path": "src/UI/Interaction/RenderWindowMouseInteractor.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDER_WINDOW_MOUSE_INTERACTOR_H\n#define RENDER_WINDOW_MOUSE_INTERACTOR_H\n\nstruct GLFWwindow;\n\n/**\n * This class is derived in a LinuxRenderWindowMouseInteractor and a WindowsRenderWindowMouseInteractor.\n * \n * This is because GLFW_CURSOR_DISABLED seems to be buggued on Windows. There's some kind of annoying \n * cursor jumping happening. \n * \n * Therefore, a \"custom\" solution that forcefully repositions the cursor at its previous position \n * has been implemented to provide unlimited movement (otherwise the cursor would end up getting \n * out of the window). On Linux, it seemed that glfwSetCursorPosition had no effect (my cursor \n * wasn't being repositionned at all during my testing). However, on Linux, there's no cursor \n * jumping with GLFW_CURSOR_DISABLED so we can use that (using the same implementation as Windows \n * is broken anyways because, again, glfwSetCursorPosition, which is used by the Windows implementation, \n * has no effect). \n * \n * This is why we have 2 different implementations:\n * \n *  - Windows uses a replacement implementation to GLFW_CURSOR_DISABLED that manually\n *      repositions the cursor using glfwSetCursorPosition\n *  - Linux uses GLFW_CURSOR_DISABLED (we could have used only the Windows implementation but\n *      glfwSetCursorPosition doesn't seemed to be working during my testing on Linux)\n */\nclass RenderWindowMouseInteractor\n{\npublic:\n    virtual void set_callbacks(GLFWwindow* window) {}\n\n    /**\n     * Returns true if either to left mouse button or the right\n     * mouse button is currently held down\n     */\n    bool is_interacting();\n\n    void set_interacting_left_button(bool interacting); \n    void set_interacting_right_button(bool interacting);\n\n    bool is_interacting_right_button();\n    bool is_interacting_left_button();\n\nprotected:\n    static void glfw_mouse_scroll_callback(GLFWwindow* window, double xoffset, double yoffset);\n\nprivate:\n    // Is the mouse left button beind held down?\n    bool m_interacting_left_button = false;\n    // Is the mouse right button beind held down?\n    bool m_interacting_right_button = false;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/Interaction/WindowsRenderWindowMouseInteractor.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"UI/Interaction/WindowsRenderWindowMouseInteractor.h\"\n#include \"UI/RenderWindow.h\"\n\n#include \"GLFW/glfw3.h\"\n#include \"imgui.h\"\n\nvoid WindowsRenderWindowMouseInteractor::glfw_mouse_button_callback(GLFWwindow* window, int button, int action, int mods)\n{\n\tvoid* user_pointer = glfwGetWindowUserPointer(window);\n\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n\n\tstd::shared_ptr<RenderWindowMouseInteractor> interactor_instance = render_window->get_mouse_interactor();\n\tstd::shared_ptr<WindowsRenderWindowMouseInteractor> windows_interactor = std::dynamic_pointer_cast<WindowsRenderWindowMouseInteractor>(interactor_instance);\n\n\t// If it is the render window that is hovered, we're going to move the camera so we take\n\t// the inputs\n\tbool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n\tbool imgui_wants_mouse = ImGui::GetIO().WantCaptureMouse && !render_window_hovered;\n\n\tswitch (button)\n\t{\n\tcase GLFW_MOUSE_BUTTON_LEFT:\n\t\tinteractor_instance->set_interacting_left_button((action == GLFW_PRESS) && !imgui_wants_mouse);\n\n\t\tbreak;\n\n\tcase GLFW_MOUSE_BUTTON_RIGHT:\n\t\tinteractor_instance->set_interacting_right_button((action == GLFW_PRESS) && !imgui_wants_mouse);\n\n\t\tbreak;\n\t}\n\n\tbool is_mouse_pressed = interactor_instance->is_interacting();\n\tif (is_mouse_pressed)\n\t{\n\t\tdouble current_x, current_y;\n\t\tglfwGetCursorPos(window, &current_x, &current_y);\n\t\twindows_interactor->m_grab_cursor_position = std::make_pair(static_cast<float>(current_x), static_cast<float>(current_y));\n\n\t\twindows_interactor->m_just_pressed = true;\n\t}\n\telse\n\t\twindows_interactor->m_just_pressed = false;\n}\n\nvoid WindowsRenderWindowMouseInteractor::glfw_mouse_cursor_callback(GLFWwindow* window, double xpos, double ypos)\n{\n\tvoid* user_pointer = glfwGetWindowUserPointer(window);\n\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(user_pointer);\n\n\tstd::shared_ptr<RenderWindowMouseInteractor> interactor_instance = render_window->get_mouse_interactor();\n\tstd::shared_ptr<WindowsRenderWindowMouseInteractor> windows_interactor = std::dynamic_pointer_cast<WindowsRenderWindowMouseInteractor>(interactor_instance);\n\n\tImGuiIO& io = ImGui::GetIO();\n\n\t// If it is the render window that is hovered, we're going to move the camera so we take\n\t// the inputs\n\tbool render_window_hovered = render_window->get_imgui_renderer()->get_imgui_render_window().is_hovered();\n\tbool imgui_want_mouse = io.WantCaptureMouse && !render_window_hovered;\n\tif (!imgui_want_mouse)\n\t{\n\t\tif (windows_interactor->m_just_pressed)\n\t\t{\n\t\t\t// We want to skip the frame where the mouse is being repositioned to\n\t\t\t// the center of the screen because if the cursor wasn't at the center,\n\t\t\t// we're going to consider to delta from the old position to the center as\n\t\t\t// the moving having moved but it's not the case. The user didn't move the\n\t\t\t// mouse, it's us forcing it in the center of the viewport\n\t\t\twindows_interactor->m_just_pressed = false;\n\n\t\t\treturn;\n\t\t}\n\n\t\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(glfwGetWindowUserPointer(window));\n\n\t\tfloat xposf = static_cast<float>(xpos);\n\t\tfloat yposf = static_cast<float>(ypos);\n\n\t\tstd::pair<float, float> grab_position = windows_interactor->m_grab_cursor_position;\n\t\tif (grab_position.first == -1 && grab_position.second == -1)\n\t\t\t// If this is the first position of the cursor, nothing to do\n\t\t\t;\n\t\telse\n\t\t{\n\t\t\t// Computing the difference in movement\n\t\t\tstd::pair<float, float> difference = std::make_pair(xposf - grab_position.first, yposf - grab_position.second);\n\n\t\t\tif (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_RIGHT) == GLFW_PRESS)\n\t\t\t\trender_window->update_renderer_view_translation(-difference.first / 300.0f, difference.second / 300.0f, false);\n\n\t\t\tif (glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT) == GLFW_PRESS)\n\t\t\t\trender_window->update_renderer_view_rotation(-difference.first, -difference.second);\n\t\t}\n\n\t\t// Updating the position\n\t\tif (interactor_instance->is_interacting())\n\t\t\t// Locking the cursor in place as long as we're moving the camera\n\t\t\tglfwSetCursorPos(window, grab_position.first, grab_position.second);\n\t}\n}\n\nvoid WindowsRenderWindowMouseInteractor::set_callbacks(GLFWwindow* window)\n{\n    glfwSetCursorPosCallback(window, WindowsRenderWindowMouseInteractor::glfw_mouse_cursor_callback);\n    glfwSetMouseButtonCallback(window, WindowsRenderWindowMouseInteractor::glfw_mouse_button_callback);\n\tglfwSetScrollCallback(window, RenderWindowMouseInteractor::glfw_mouse_scroll_callback);\n}\n"
  },
  {
    "path": "src/UI/Interaction/WindowsRenderWindowMouseInteractor.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef WINDOWS_RENDER_WINDOW_MOUSE_INTERACTOR_H\n#define WINDOWS_RENDER_WINDOW_MOUSE_INTERACTOR_H\n\n#include \"UI/Interaction/RenderWindowMouseInteractor.h\"\n\n#include <utility>\n\nstruct GLFWwindow;\n\nclass WindowsRenderWindowMouseInteractor : public RenderWindowMouseInteractor\n{\npublic:\n    void set_callbacks(GLFWwindow* window);\n\nprotected:\n    bool m_just_pressed = false;\n\nprivate:\n    static void glfw_mouse_button_callback(GLFWwindow* window, int button, int action, int mods);\n    static void glfw_mouse_cursor_callback(GLFWwindow* window, double xpos, double ypos);\n\n    // Position of the mouse when the user first clicked the viewport.\n    // Used to put the cursor back in place to allow infinite mouse movements\n    std::pair<float, float> m_grab_cursor_position = { 0.0f, 0.0f };\n\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/PerformanceMetricsComputer.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"PerformanceMetricsComputer.h\"\n\n#include <cmath>\n#include <iostream>\n\nfloat PerformanceMetricsComputer::data_getter(void* data, int index)\n{\n\treturn static_cast<double*>(data)[index];\n}\n\nvoid PerformanceMetricsComputer::init_key(const std::string& key)\n{\n\tif (m_key_init.find(key) == m_key_init.end())\n\t{\n\t\t// Key data not init yet\n\t\tm_key_init[key] = true;\n\n\t\tm_values[key] = std::vector<double>(m_window_size, 0.0f);\n\t\tm_values_count[key] = 0;\n\t\tm_values_sum[key] = 0.0;\n\t\tm_values_sum_of_squares[key] = 0.0;\n\t\tm_data_indices[key] = 0;\n\t\tm_min_max_data[key] = std::multiset<double>();\n\t}\n}\n\nstd::vector<double>& PerformanceMetricsComputer::get_data(const std::string& key)\n{\n\tinit_key(key);\n\n\treturn m_values.find(key)->second;\n}\n\nint PerformanceMetricsComputer::get_value_count(const std::string& key)\n{\n\treturn m_values_count[key];\n}\n\nint PerformanceMetricsComputer::get_data_index(const std::string& key)\n{\n\treturn m_data_indices[key];\n}\n\nvoid PerformanceMetricsComputer::add_value(const std::string& key, double new_value)\n{\n\tinit_key(key);\n\n\t// Where are we going to insert the next value in the m_values vector\n\tunsigned int next_index = m_data_indices[key];\n\tm_data_indices[key] = m_data_indices[key] + 1;\n\tif (m_data_indices[key] == m_window_size)\n\t\tm_data_indices[key] = 0;\n\n\tdouble removed_value = m_values[key].at(next_index);\n\tm_values[key].at(next_index) = new_value;\n\n\t// Whether or not we've reached the maximum number of values we can\n\t// store. If true, we are now removing a value every single time we want to insert one\n\tbool at_capacity = false;\n\tint& current_value_count = m_values_count[key];\n\tif (current_value_count < m_window_size && next_index < current_value_count)\n\t\t// This is a special case when we just resized the window to \n\t\t// a size larger than the previous one. \n\t\t// \n\t\t// This can cause issues in the following situation:\n\t\t//\n\t\t//  - The window size is 100. We have input 180 values so far. \n\t\t//\t\tThis means that we're at capacity and we've stared from the beginning, \n\t\t//\t\toverriding the first 80 values\n\t\t//\t- The window is resized to a size of 250\n\t\t//\t- We keep adding new values and we are currently at the value 230.\n\t\t//\t- Without this if() statement right here, we would have m_values_count = 250, not 230\n\t\t//\t\tbecause we started incrementing m_values_count[key] right when we resized the window\n\t\t//\t\t(when we were overriding the 80th value). We counted 20 values too many between the 80th\n\t\t//\t\tand the 100th.\n\t\t//\t\tWe're now \"at_capacity\" at value 230 instead of at value 250 and that causes issues in the\n\t\t//\t\trest of the perf metrics computer so this if() statement here prevents incrementing m_values_count\n\t\t//\t\t\"falsely\"\n\t\tat_capacity = false;\n\telse\n\t\tat_capacity = ++m_values_count[key] > m_window_size;\n\tm_values_count[key] -= at_capacity;\n\n\t// Updating the sums and sums of squares according to the value we removed / added\n\tm_values_sum[key] -= removed_value;\n\tm_values_sum[key] += new_value;\n\tm_values_sum_of_squares[key] -= removed_value * removed_value;\n\tm_values_sum_of_squares[key] += new_value * new_value;\n\n\tauto& multiset = m_min_max_data.find(key)->second;\n\tif (at_capacity)\n\t{\n\t\tauto position = multiset.find(removed_value);\n\t\tmultiset.erase(position);\n\t}\n\tmultiset.insert(new_value);\n}\n\ndouble PerformanceMetricsComputer::get_current_value(const std::string& key)\n{\n\tif (m_values_count[key] == 0)\n\t\treturn 0.0f;\n\n\t// m_data_indices[key] is the index of the value that we're going to insert next\n\t// but we want the index of the value last inserted so we -1 that value\n\tint current_index = m_data_indices[key];\n\tint previous_index = current_index - 1;\n\tif (previous_index == -1)\n\t\tprevious_index = m_values_count[key] - 1;\n\n\treturn m_values[key][previous_index];\n}\n\ndouble PerformanceMetricsComputer::get_average(const std::string& key)\n{\n\tif (m_values_count[key] == 0)\n\t\treturn -1.0;\n\n\treturn m_values_sum[key] / m_values_count[key];\n}\n\ndouble PerformanceMetricsComputer::get_variance(const std::string& key)\n{\n\tif (m_values_count[key] == 0)\n\t\treturn -1.0;\n\n\tdouble average = get_average(key);\n\treturn m_values_sum_of_squares[key] / m_values_count[key] - average * average;\n}\n\ndouble PerformanceMetricsComputer::get_standard_deviation(const std::string& key)\n{\n\tif (m_values_count[key] == 0)\n\t\treturn -1.0;\n\n\treturn std::sqrt(get_variance(key));\n}\n\ndouble PerformanceMetricsComputer::get_min(const std::string& key)\n{\n\tif (m_min_max_data[key].size() == 0)\n\t\treturn -1.0;\n\n\treturn *m_min_max_data[key].begin();\n}\n\ndouble PerformanceMetricsComputer::get_max(const std::string& key)\n{\n\tif (m_min_max_data[key].size() == 0)\n\t\treturn -1.0;\n\n\t// rbegin() is the last element\n\t// end() would be past the last element so we're not using end() here\n\treturn *m_min_max_data[key].rbegin();\n}\n\nint PerformanceMetricsComputer::get_window_size() const\n{\n\treturn m_window_size;\n}\n\nint& PerformanceMetricsComputer::get_window_size()\n{\n\treturn m_window_size;\n}\n\nvoid PerformanceMetricsComputer::resize_window(int new_size)\n{\n\tif (m_window_size == new_size)\n\t\treturn;\n\n\tresize_values_vectors(new_size);\n\trecompute_data(new_size);\n\n\tm_window_size = new_size;\n}\n\nvoid PerformanceMetricsComputer::resize_values_vectors(int new_size)\n{\n\tfor (auto& pair_kv : m_values)\n\t\t// Resizing the values vectors.\n\t\t// If resizing to a smaller size, .resize() throws away the elements at the end.\n\t\t// If resizing to a greater size, .resize() inserts new element at the end.\n\t\t// This is what we want.\n\t\tpair_kv.second.resize(new_size, 0.0);\n}\n\nvoid PerformanceMetricsComputer::recompute_data(int new_size)\n{\n\t// This function could be recomputing elements in a smarter way but because recompute_data\n\t// isn't expected to be called that often at all, let's keep it simple\n\n\tif (new_size > m_window_size)\n\t\t// Nothing to recompute, new elements will be added later\n\t\treturn;\n\n\t// Else, elements were removed from the end, we need to recompute the sum,\n\t// sums of squares, ... without taking these removed elements into account\n\n\tfor (auto pair_kv : m_values)\n\t{\n\t\tconst std::string& key = pair_kv.first;\n\n\t\tif (m_values_count[key] < new_size)\n\t\t{\n\t\t\t// There wasn't enough values so resizing the vector didn't remove any value,\n\t\t\t// nothing to recompute\n\n\t\t\tcontinue;\n\t\t}\n\n\t\tm_values_count[key] = std::min(m_values_count[key], new_size);\n\t\tm_values_sum[key] = 0.0;\n\t\tm_values_sum_of_squares[key] = 0.0;\n\t\tm_data_indices[key] = m_data_indices[key] > new_size ? 0 : m_data_indices[key];\n\t\tm_min_max_data[key].clear();\n\n\t\tstd::vector<double>& values = pair_kv.second;\n\t\tfor (int i = 0; i < new_size; i++)\n\t\t{\n\t\t\tdouble value = values[i];\n\n\t\t\tm_min_max_data[key].insert(value);\n\t\t\tm_values_sum[key] += value;\n\t\t\tm_values_sum_of_squares[key] += value * value;\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "src/UI/PerformanceMetricsComputer.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef PERFORMANCE_METRICS_COMPUTER\n#define PERFORMANCE_METRICS_COMPUTER\n\n#include <set>\n#include <string>\n#include <unordered_map>\n#include <vector>\n\nclass PerformanceMetricsComputer\n{\npublic:\n\tstatic float data_getter(void* data, int index);\n\n\tvoid init_key(const std::string& key);\n\tstd::vector<double>& get_data(const std::string& key);\n\tint get_value_count(const std::string& key);\n\tint get_data_index(const std::string& key);\n\n\tvoid add_value(const std::string& key, double value);\n\n\tdouble get_current_value(const std::string& key);\n\tdouble get_average(const std::string& key);\n\tdouble get_variance(const std::string& key);\n\tdouble get_standard_deviation(const std::string& key);\n\tdouble get_min(const std::string& key);\n\tdouble get_max(const std::string& key);\n\n\tint get_window_size() const;\n\tint& get_window_size();\n\tvoid resize_window(int new_size);\n\nprivate:\n\tvoid resize_values_vectors(int new_size);\n\n\t/**\n\t * This function is called when resizing the window. \n\t * Because we have thrown away elements that were at the end of the vectors,\n\t * we're going to have to recompute the average, sums, ... not to take into account\n\t * the elements that were removed\n\t */\n\tvoid recompute_data(int new_size);\n\n\tint m_window_size = 250;\n\n\t// Whether or not we have already initialized all the unordered maps for a given key\n\tstd::unordered_map<std::string, bool> m_key_init;\n\t// The values for the given key. There are m_values_count[key] valid values\n\t// in the vector at the given key\n\tstd::unordered_map<std::string, std::vector<double>> m_values;\n\t// How many valid values are present in m_values from the beginning of its vector<double>\n\tstd::unordered_map<std::string, int> m_values_count;\n\t// Sum of the last m_window_size values\n\tstd::unordered_map<std::string, double> m_values_sum;\n\t// Sum of the last m_window_size values squared\n\tstd::unordered_map<std::string, double> m_values_sum_of_squares;\n\t// Where is the next value going to be inserted in m_values\n\tstd::unordered_map<std::string, unsigned int> m_data_indices;\n\t// Using a multiset here allows to easily retrieve the minimum and maximum of the values\n\tstd::unordered_map<std::string, std::multiset<double>> m_min_max_data;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/RenderWindow.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Compiler/GPUKernelCompiler.h\"\n#include \"Scene/SceneParser.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"tracy/TracyOpenGL.hpp\"\n#include \"UI/RenderWindow.h\"\n#include \"UI/Interaction/LinuxRenderWindowMouseInteractor.h\"\n#include \"UI/Interaction/WindowsRenderWindowMouseInteractor.h\"\n#include \"Utils/Utils.h\"\n\n#include <functional>\n#include <iostream>\n\n#include \"stb_image_write.h\"\n\n// - try simplifying the material to just a diffuse component to see if that helps memory accesses --> 8/10%\n// - try removing everything about nested dielectrics to see the register/spilling usage and performance --> ~1/2%\n\n\n// GPUKernelCompiler for waiting on threads currently reading files on disk\nextern GPUKernelCompiler g_gpu_kernel_compiler;\nextern ImGuiLogger g_imgui_logger;\n\n// TODO still some config with envmap sampling that doesn't quite match the reference when playing with all the ReGIR / ReSTIR DI settings\n\n\n// TODO to mix microfacet regularization & BSDF MIS RAY reuse, we can check if we regularized hard or not. If the regularization roughness difference is large, let's not reuse the ray as this may roughen glossy objects. Otherwise, we can reuse\n// - Test ReSTIR GI with diffuse transmission\n// - We don't have to store the ReSTIR **samples** in the spatial pass. We can just store a pixel index and then on the next pass, when we need the sample, we can use that pixel index to go fetch the sample at the right pixel\n// - distance rejection heuristic for GI reconnection\n// - Alpah tests darkening ReSTIR DI\n// - ReSTIR DI + the-white-room.gltf + CPU (opti on) + no debug + no envmap ---> denormalized check triggered\n\n// TODO ReSTIR\n// - We shouldn't shoot a shadow ray in the light evaluation if the BSDF sample was chosen because this already has visibility\n// - Can we do something for restir that has a hash grid for the first hits of the rays and then for spatial reuse, each pixel looks up its cell and reuse paths from the same cell (and thus same geometry if we include the normals in the hash grid). This would basically be a more accurate version of the directional spatial reuse\n//\t\t- One issue that we're going to have is: for a given pixel, we can compute it hash cell but then how do we know which other reservoirs (neighbors) are in the same hash cell?\n//\t\t\t- Fix that by: counting how different hash cell the primary hits create\n//\t\t\t- Create one counter per hash cell\n//\t\t\t- Count how many pixels fall in a given hash cell\n//\t\t\t4. Index the hash cell from 0 to N-1 where N is the number of hash cells\n//\t\t\t- Then we can have a pass that assigns each pixel index to a hash cell:\n//\t\t\t\t- For each pixel, find its hash cell. From the hash cell index of step 4., we know where in the fullscreen-wide buffer we need to write the pixel index by using the prefix sum of the hash cell counters up til the current hash cell index\n//\t\t\t- Once that's done, we know for a given pixel how many valid neighbors there are and what's their pixel indices\n// \n// - For the spatial reuse buffer, we don't have to store a whole grid at all, we can just store the index of the cell the reservoir reused from --> massive VRAM saves\n// - Using the indirect index for the spatial output buffer, can we double buffer the initial candidates grid and run the spatial reuse of ReGIR async of the path tracing too?\n// - There is bias in ReSTIR DI\n// - Greedy spatial reuse to retry neighbors if we didn't get a good one\n//\t\t\tFor the greedy neighbor search of restir spatial reuse, maybe reduce progressively the radius ?\n// - memory coalescing aware spatial reuse pattern --> per warp / per half warp to reduce correlation artifacts?\n// - can we maybe stop ReSTIR GI from resampling specular lobe samples? Since it's bound to fail anwyays. And do not resample on glass\n// - See how many pixels of ReSTIR GI end up with the initial candidate as the final sample --> we can reuse NEE at the first hit for those samples in the shading pass instead of recomputing NEE\n// - BSDF MIS Reuse for ReSTIR DI\n// - Force albedo to white for spatial reuse? Because what's interesting to reuse is the shape of the BRDF and the incident radiance. Resampling from a black diffuse is still interesting. The albedo doesn't matter\n// - Have a look at compute usage with the profiler with only a camera ray kernel and more and more of the code to see what's dropping the compute usage \n// - If it is the canonical sample that was resampled in ReSTIR GI, recomputing direct lighting at the sample point isn't needed and could be stored in the reservoir?\n\n// TODO ReGIR\n// - Can we group light triangles by their meshes and pre-compute a CDF per each grid cell for which meshes are the best one for that grid cell.\n//\t\t- We would then use that CDF during the grid fill to resample a good mesh and then resample a good triangle in that mesh\n//\t\t- We'er going to have a similar \"CDF per cell\" situation as \"Cache Points\" from Disney some maybe there are going to be some ideas to pick from their CDF blending / visibility integration etc...\n// - Store target function in reservoir to avoid recomputing it during pairwise MIS shading resampling?\n// - Stochastic light culling harada et al to improve base candidate sampling\n// - Pixel deinterleaving for reducing correlations in light presampling ? Segovia 2006\n// - Can we have a lightweight light rejection (russian roulette) method in the grid fill? So even if we have 32 candidates per grid fill cell, if a light is evidently too far away to contribute, we can reject it and not count that as a try from our 32 tries. The rejection test needs to be lightweight such that it is significantly less expensive than doing a full candidate\n//\t\t- To make the rejection lightweight, can we have a luminance of emission baked into our materials such that we can simply check the luminance of the light (one float fetch) instead of the full RGB color which would be 3 float fetchtes. Maybe that would be faster\n// - Increased the number of shading retries?\n// - NEE++ compaction: we only need uchar per value, not uint\n// - Can we shade only the 4 non canonical neighbors + the final reservoir instead of shading everyone?\n//\t\t-  For the MIS weights, we can use the unnormalized target functions for everyone and it should be fine?\n// - For interacting with ReGIR, we can probably just shade with non canonical candidates and that's it. It will be biased but at least it won't be disgusting because of the lack of pre integration information\n// - Can we shade multiple reservoirs without shooting shadow rays by using NEE++ to make sure that the reservoir isn't shadowed? This may be biased but maybe not too bad?\n// - Can we have a biased NEE++ where we clamp the normalization factor to avoid fireflies?\n// - Can we evaluate the ratio between the UCW and the final contribution? If the ratio is higher than a threshold then that's an outlier / Firefly and we may want to skip it attenuate it\n// - Can we do many many more samples per each reservoir during the pre integration pass (and thus have less reservoirs per cell) to improve the quality of the integral estimate with less reservoirs and less integration iterations?\n// - Spatial reuse seems to introduce quite a bit of correlations so we would be better off improving the base sampling to not have to rely on spatial reuse for good samples quality\n// - NEE++ maximum load factor to avoid the hash grid being totally filled and performance dying because of that\n// - Can we randomize the hash of grid cells to avoid correlations? Basically subdivide each grid cell into 2/3/4/... grid cells and randomly assign the space of the main grid cell to either 1/2/3/... of the sub such that correlation aretifacts are basically randomized and do not look bad\n// - Can we compute the \"gradient\" of cell occupancy of the grid to adjust the factor by which we resize the grid every time? To avoid overshooting too much and having a resized grid that is too large\n// - Can we just use the 32 reservoirs for shading as the input to the pre integration process? Is that enough for an accurate integral estimate?\n// - Maybe not having the spatial reuse in the pre integration is ok still for normalization factor\n// - No need to read random reservoirs in the pre integration kernel, we can just read the reservoirs one by one of each grid cell and integrate them all. \n//\t\t- Opens up possibilities for coalescing the reads of the reservoirs in the pre integration kernel\n// - Super large resolution on surfaces that do not allow light sampling for the hash grid since we do not need ReGIR here\n// - We need a special path for ReGIR, hard to use as a light sampling plug in, lots of opti to do with a special path\n// - Variable jitter radius basezd on cell size\n// - Include normal in hash grid for low roughness surfaces to have better BRDF sampling precision\n// - Decoupled shading and reuse ReGIR: add visibility rays during the shading so that we have visiblity resampling which is very good and on top of that, we can totally shade the reservoir because the visibility has been computed so the rest of the shading isn't super expensive: maybe use NEE++ in there to reduce shadow rays? Or the visibility caching thing that is biased?\n// - Can we maybe add BRDF samples in the grid fill for rough BRDFs? This will enable perfect MIS for diffuse BRDFs which should be good for the bistro many light with the light close for example. This could also be enough for rough-ish specular BRDFs\n//\t\t- We can probably trace the BRDF rays in a light-only BVH here and then if an intersection point is found, use NEE++ visibility estimation there\n//\t\t- Maybe have some form of roughness threshold when using ReGIR with MIS to use MIS only on specular surfaces where the grid fill BRDF rays didn't help\n// - Only need 1 bit per cell here for 'grid cells alive': whether or not a given grid cell is alive\n// - Quantize ahsh grid cell data .sum_points: we don't need the precision since this is just an average for getting an approximate center of cell\n// - Light to light grid cells should be cached in the same hash cell entry\n// - Reintroduce temporal reuse but maybe with a small M-cap, should be worth it on difficult scenes, the many lights bistro for example\n// - Limit the grid cell life length of NEE++ if it hasn't been hit in a long time\n// - Limit the grid cell life length of ReGIR if it hasn't been hit in a long time\n// - Multiple spatial reuse passes\n// - We can deallocate the emissive triangle index of the ReGIR reservoir if not using ReSTIR DI\n// - Should we have something to limit the life length of an NEE++ grid cell? So that we can remove cells unused and keep the grid size in check\n// - Trry to disable canonical and see if it converges quicker\n//\t\t- It does -----> We need to find some better MIS weights for the canonical sample\n//\t\t- Try to downweigjt canonical MIS weight instead of 1 / M\n// - Interrupt target function evaluation in ReGIR if the cosine term drops to zero such that we don't fill the NEE hash grid if the light is back facing for example\n// - Lambertian BRDF goes through lampshade in white room but principled BSDF doesn't\n// - Can we keep the grid of reservoirs from the last frame to pick them during shading to reduce correlations? Only memory cost but it's ok\n//\t\t- Maybe only that for primary hit reservoirs because those are the only one to be correlated hard?\n// - Have a variable radius when picking reservoirs for shading \n// - Issue with microfacet regularization x ReGIR?\n// - Scalarization of hash grid fill because we know that consecutive threads are in the same cell\n// - Scalarization of the hash grid fetches for the camera rays?\n// - We can optimize the grid cell aliv ecounter atomic increment by incrementing by the number of threads in the wavefront instead of 1 per thread\n// - Deduplicate hash grid cell idnex calculations in fetch reservoirs functions mainly for performance reasons\n// - To profile the hash grid, may be useful to, for example, store everything from the camera rays pass into some buffers and then run a separate hash grid cell data fill kernel just to be able to profile that kernel in isolation\n// - For the spatial reuse output grid buffer, we don't have to store the rservoirs, we can just store the indices of the cell which we resample from so let's save some VRAM there\n// - Can we store just the light index per each regir sample? And reconstruct, the normal and everything from that? Maybe that's not going to be much more expensive that having to read everything from the Regir sample but this would save a lot of memory\n// - Directional spatial reuse to directly hit the right neighbors instead of having to retry multiple times (one memory access for each retry)\n// - Do we have bad divergence when ReGIR falls back to power sampling? Maybe we could retry more and more ReGIR until we find a reservoir to avoid the divergence\n// - If we want initial visibility in ReGIR, we're going to have to check whether the center of the cell is in an object or not because otherwise, all the samples for that cell are going to be occluded and that's going to be biased if a surface goes through that cell\n// - Use some shjortcut in the BSDF in the target function during shading: rough material only use a constant BSDF, nothing more\n// - When computing the MIS weights by counting the neighbors, we actually don't need the full target function with the emission and everything, we just need the cosine term and shadow ray probably\n// - De-duplicate BSDF computations during shading: we evaluate the BRDF during the reservoir resampling and again during the light sampling\n//\t\tMay be exclusive with the BSDF simplifications that can be done in the target function because then we wouldn't be evaluating the proper full BSDF in the target function\n// - Can we have some kind of visibility percentage grid that we can use during the resampling to help with visibility noise? \n//\t\t- We would have a voxel grid on top of the ReGIR grid. \n//\t\t- That grid would contain as many floats per cell as there are reservoirs per cell in ReGIR\n//\t\t- Each one of these floats would contain a percentage of visibility for the corresponding reservoir index of the cell\n//\t\t- The visibility percentage would be computed by averaging the successful visibility rays traced during shading\n//\t\t\t- The issue is that the reservoirs aren't persistent so any data accumulated will be discarded at the next frame when\n//\t\t\t- the grid is rebuilt\n//\t\t\n//\t\t\t- We would need a prepass at lower resolution, same as for radiance caching?\n//\t\t\t- Maybe we can keep the grids of past frames to help with that?\n// - For the visibility reuse of ReGIR, maybe we can just trace from the center of the cell and if at shading time, the reservoir is 0, we know that this must be because the reservoir is occluded for that sample so we can just draw a canonical candidate instead there\n//\t\t- Always tracing from the center of the cell may be always broken depending on the geometry of the scene so maybe we want to trace from the center of the cell as a default but as path tracing progresses, we want to save one point on the surface of geometry in that cell and use that point to trace shadow rays from onwards, that way we're always tracing from a valid surface in the grid cell\n//\t\t- And with that new \"representative point\" for each cell, we can also have the normal to evaluate the cosine term\n// - For performance, at shading time when resampling the reservoirs, there may be only a few materials that benefit from the BSDF in the resampling target function because lambertian doesn't care, mirrors don't care, specular don't care, really it's only materials at like 0.3 roughness ish\n// - Looking at the average contribution of cells seems to be giving some good metric on the performance of the sampling per cell no? What can we do with that info? Adaptive sampling somehow?\n//\t\tMaybe we can adaptively adapt the number of samples per grid cell during grid fill with that\n// - Cull lights that have too low a contribution during grid fill. Maybe some power function or something to keep things unbiased, not just plain reject\n// - NEE++ mix up to help with visibility sampling?\n// - The spatial reuse seems giga compute bound, try to optimize the cell compute functions in Settings.h\n// - Is the grid fill bottleneck by random light sampling? Try on the class white room to see if perf improves\n//\t\tA little bit yeah. Maybe we can do something with light presampling per cell\n// - Shared mem ray tracing helps a ton for ReGIR grid fill & spatial reuse ----> maybe have them in a separate kernel to be able to use max shared mem without destroying the L1 for the rest of the kernels?\n// - Can we add the canonical sample at the end of the spatial pass instead of in the shading pass?\n// - The idea to fix the bad ReGIR target function that may prioritze occluded samples is to use NEE with a visibility weight\n// - Maybe we can just swap the buffers for ReGIR staging buffers instead of copying\n// - Can we use ReSTIR DI and fill the ReGIR grid with the ReSTIR DI samples? ---> Doesn't work at later bounces though\n// - Can we start another grid fill in parallel of the mega kernel after the spatial reuse such that we overlap some work and don't have to do the grid fill at the next frame\n//\t\t- We can even decouple the spatial reuse with the visibility pass of it and launch the grid fill during the visibility pass of teh spatial reuse\n// - Introduce envmap sampling into ReGIR to avoid having to integrate the envmap in a separate domain: big perf boost\n// - When shading, maybe pick random reservoirs from a single neighboring cell to reduce shadow rays count but do that on a per warp basis to reduce the size of artifacts (which would be grid cell size otherwise)\n// - Is there something to do with a wavefront architecture when tracing shadow rays at the end of the spatial reuse or something? Do we want maybe to dispatch kernels together for tracing from a given cell?\n// - Maybe we can do some double buffering on the grid to be able to spatially reuse WHILE generating the gri fill: we would run the grid fill and fill grid 1 while spatially reusing on grid 2 which was filled last frame\n//\t\tThe hope being that the computations can overlap a bit with the ray traversal\n//\t\tWe can just test that tehroretically and see if that helps performance at all\n// - Can we do something with the time per grid cell ray? To try and reduce this \"long tails\" effect\n//\t\t- Maybe what we can do here is compact the hard threads together so that we are able to launch all the light rays together and avoid divergence between light and heavy rays\n// - Gather some information of how many light samples are rejected because of visibility to get a feel for how much can be gained with NEE++\n//\t\t- Also incorporate back facing lights info\n\n// TODO restir gi render pass inheriting from megakernel render pass seems to compile mega kernel even though we don't need it\n// - ReSTIR redundant render_data.g_buffer.primary_hit_position[pixel_index] load for both shading_point and view_direction\n// - ReSTIR only load the rest of the reservoir if its UCW isn't 0\n\n\n// TODOs  performance improvements branch:\n// - Remove HIPRT INLINE everywhere\n// - Vertex cache optimization buffer arrangement for better triangle pairing and better tracing performance?\n// - Thread is swizzling (reorder ray invocations) https://github.com/BoyBaykiller/IDKEngine/blob/95a15c1db02f11bd2f47bb81bcfccf0943d3e703/IDKEngine/Resource/Shaders/PathTracing/FirstHit/compute.glsl#L206\n// - Option for terminating rays on emissive hits? --> this is going to be biased but may help performance\n// - Have a look at reweghing fireflies for Monte Carlo instead of Gmon so we can remove fireflies unbiasedly without the darkening\n// - There seems be some scratch store on the RNG state? Try to offload that to shared mem?\n//\t\t- Do that after wavefront because wavefront may solve the issue\n// - also reuse BSDF mis ray of envmap MIS\n// - We do not need the nested dielecttrics stack management in the camera rays kernel\n// - In the material packing, pack major material properties together: coat, metallic, specular_transmission, diffuse_transmission, ... so that we can, in a single memory access, determine whether or not we need to read the rest of the coat, specular transmission ,...\n// - If hitting the same material as before, don't load the material from VRAM as it's exactly the same? (only works for non-textured materials)\n// - When doing MIS, if we sampled a BSDF sample on a delta distribution, we shouldn't bother sampling lights because we know that the BSDF sample is going to overweight everything else and the light sample is going to have a MIS weight of 0 anyways\n// - MIS disabled after some number of bounces? not on glass though? MIS disabled after the ray throughput gets below some threshold?\n// - texture compression\n// - store full pointers to textures in materails instead of indirect indices? probably cheaper to have ibigger materials than to havbe to do that indirect fetch?\n// - limit  number of bounces based on material type\n// - use material SoA in GBuffer and only load what's necessary (i.e. not the thin film and all of that if the material isn't using thin-film, ...)\n// - use the fact that some values are already computed in bsdf_sample to pass them to bsdf_eval in a big BSDFStateStructure or something to avoid recomputing\n// - schlick fresnel in many places? instead of correct fresnel. switch in \"performance settings\"\n// \n// ------------------- STILL RELEVANT WITH WAVEFRONT ? -------------------\n// - if we don't have the ray volume state in the GBuffer anymore, we can remove the stack handlign in the trace ray function of the camera rays\n// - merge camera rays and path tracer?\n// - store Material in GBuffer only if using ReSTIR, otherwise, just reconstruct it in the path tracign kernel\n// ------------------- STILL RELEVANT WITH WAVEFRONT ? -------------------\n// \n// ------------------- DO AFTER WAVEFRONT -------------------\n// - maybe have shaders without energy compensation? because this do be eating quite a lot of registers\n// - let's do some ray reordering because in complex scenes and complex materials and without hardware RT; this may actually  be quite worth it\n// - dispatch mega kernel when only a few rays are left alive after compaction?\n// - investigate where the big register usage comes from (by commenting lines) --> split shaders there?\n// - split shaders for material specifics and dispatch in parallel?\n// - use wavefront path tracing to evaluate direct  lighting, envmap and BSDF sample in parallel\n// - start shooting camera rays for frame N+1 during frame N?\n// - compaction - https://github.com/microsoft/directxshadercompiler/wiki/wave-intrinsics#example\n// - launch bounds optimization?\n// - thread group size optimization?\n// - double buffering of frames in general to better keep the GPU occupied?\n// - can we gain in performance by having the trace rays functions in completely separate passes so that we can have the maximum amount of L1 cache in the passes that now don't trace rays? (and use max amount of shared mem in the rays only passes)\n// ------------------- DO AFTER WAVEFRONT -------------------\n\n\n// TODO known bugs / incorrectness:\n// - take transmission color into account when direct sampling a light source that is inside a volume: leave that for when implementing proper volumes?\n// - denoiser AOVs not accounting for transmission correctly since Disney  BSDF\n//\t  - same with perfect reflection\n// - threadmanager: what if we start a thread with a dependency A on a thread that itself has a dependency B? we're going to try join dependency A even if thread with dependency on B hasn't even started yet --> joining nothing --> immediate return --> should have waited for the dependency but hasn't\n// - Thin-film interference energy conservation/preservation is broken with \"strong BSDF energy conservation\" --> too bright (with transmission at 1.0f), even with film thickness == 0.0f\n// - When overriding the base color for example in the global material overrider, if we then uncheck the base color override to stop overriding the base color, it returns the material to its very default base color  (the one  read from the scene file) instead of  returning it to what the user may have modified up to that point\n// - Probably some weirdness with how light sampling is handled while inside a dielectric: inside_surface_multiplier? cosine term < 0 check? there shouldn't be any of that basically, it should just be evaluating the BSDF\n// - Emissive chminey texture broken in scandinavian-studio\n// - For any material that is perfectly specular / perfectly transparent (the issue is most appearant with mirrors or IOR 1 glass), seeing the envmap through this object takes the envmap intensity scaling into account and so the envmap through the object is much brighter than the main background (when camera rays miss the scene and hit the envmap directly) without background envmap intensity scaling: https://mega.nz/file/x8I12Q6b#DJ2ZobBav9rwFdtvTX-CmgA1eFEgKprjXSvOg0My38o\n// - White furnace mode not turning emissives off in the cornell_pbr with ReSTIR GI?\n\n// TODO Features:\n// - RISLTC: https://data.ishaanshah.xyz/research/pdfs/risltc.pdf. Some explanations in there for projected solid angle and LTC sampling\n// - Inciteful graph to explore (started with Practical product sampling warping NVIDIA): https://inciteful.xyz/p?ids%5B%5D=W4220995884&ids%5B%5D=W3179788358&ids%5B%5D=W4403641440&ids%5B%5D=W4390345185&ids%5B%5D=W4388994411&ids%5B%5D=W4200187284&ids%5B%5D=W2885975589&ids%5B%5D=W3183450244&ids%5B%5D=W1893031899&ids%5B%5D=W3036883119&ids%5B%5D=W3044759327&ids%5B%5D=W4240396283&ids%5B%5D=W3110265079&ids%5B%5D=W2073976119&ids%5B%5D=W2988541899&ids%5B%5D=W2885239691&ids%5B%5D=W2964425571&ids%5B%5D=W2030242873&ids%5B%5D=W3044185278\n// - VisibilityCluster: Average Directional Visibility for Many-Light Rendering: https://ieeexplore.ieee.org/document/6464264\n// - Practical product sampling warping NVIDIA, there's a shadertoy for that\n// - Sample specular/diffuse lobe with the luminance of the diffuse lobe\n// - Sample specular/diffuse by taking the thrioughput of the path into account?\n// - Sample by evaluating the contribution of both samples and choosing proportional to the contribution:\n//\t\t- a next \"clever way\" would be to generate L with both diffuse and specular but using the sample random number, then compare their total \"contributions\" (whole specular+diffuse BRDF value divided by pdf of generator and multiplied by path prefix throughput), then depending on the luma of that you choose either the first or second sample.\n//\t\t- so you're making decisions about what branch you take posteriori not a-priori.\n//\n//\t\t- This has a few drawbacks :\n//\t\t- you're computing the contribution twice (but you're not really doing double the work for generation because Low Discrepancy Sequences / random numbers->maximum divergence)\n//\t\t- if you need the PDF for MIS or RIS for a given L you need to do far more work, your sampling routines must be invertible\n// \n//\t\t- The latter part is annoying because many BRDF sampling routines don't require you to find the xi which produce a given L when you want to query pdf(L).\n//\t\t- However the issue is that for every 2D value of xi you have two values of L between which you've chosen based on the value of the whole BRDF, so to know the PDF of any of the L in the pair, you need to know exactly what the other L is.\n//\n//\t\tThis is why this method is not tractable / fun for more than 2 BRDFs.\n// \n// - Vector valued monte carlo: https://suikasibyl.github.io/files/vvmc/paper.pdf\n// - Reweighting path guiding: https://zhiminfan.work/paper/mi_reweight_preprint.pdf\n// - Fixed balance heuristic: https://qingqin-hua.com/publication/2025-correct-balance/2025-correct-balance.pdf\n// - Envmap with visibility sampling: https://static.chaos.com/documents/assets/000/000/377/original/adaptive_dome_abstract.pdf?1676455588\n// - Faster PNG loading: https://github.com/richgel999/fpng\n// - Need something blocking inn \"start thread with dependency\" so that the main thread is blocked until the other thread actually started. This should solve the issue where sometilmes the main threds just joins everyone but everyone hasn't even started yet\n// - Can we have something like sharc but for light sampling? We store reservoirs in the hash table and resample everytime we read into the hash grid with some initial candidates?\n//\t\t- And maybe we can spatial reuse on that\n//\t\t- Issue with MIS weights though because the MIS weights here are going to be an integral over the scene surface of the grid cell\n//\t\t\t- Maybe SMIS and MMIS have something to say about that\n// - Stochastic light culling: https://jcgt.org/published/0005/01/02/paper-lowres.pdf\n// - Disney adaptive sampling: https://la.disneyresearch.com/wp-content/uploads/Adaptive-Rendering-with-Linear-Predictions-Paper.pdf?utm_source=chatgpt.com\n// - flush to zero denormal float numbers compiler option?\n//\t\t// -fcuda-flush-denormals-to-zero\n//\t\t// -fgpu-flush-denormals-to-zero\n// - Use a CPP preprocessor lib to preprocess shaders and see if some macro is used or not\n//\t\t- Also uses a dead code removal library such that we only have relevant code in the shader and we can know for sure which macros are used or not\n// - Eta scaling for russian roulette refractions\n// - Better adaptive sampling error metrics: https://theses.hal.science/tel-03675200v1/document, section 10.1.1, Heitz et al 2018 + Rigau et al 2003\n// - Projected solid angle light sampling https://momentsingraphics.de/ToyRenderer4RayTracing.html\n// - Disable back facing lights for performance because most of those lights, for correct meshes, are going to be occluded\n//\t\t- Add an option to re-enable manually back facing lights in the material\n// - Efficient Image-Space Shape Splatting for Monte Carlo Rendering\n// - DRMLT: https://joeylitalien.github.io/assets/drmlt/drmlt.pdf\n// - What's NEE-AT of RTXPT?\n// - Area ReSTIR just for the antialiasing part\n// - Directional albedo sampling weights for the principled BSDF importance sampling. Also, can we do \"perfect importance\" sampling where we sample each relevant lobe, evaluate them (because we have to evaluate them anyways in eval()) and choose which one is sampled proportionally to its contribution or is it exactly the idea of sampling based on directional albedo?\n// - Russian roulette improvements: http://wscg.zcu.cz/wscg2003/Papers_2003/C29.pdf\n// - Some MIS weights ideas in: https://momentsingraphics.de/ToyRenderer4RayTracing.html in \"Combining diffuse and specular\"\n// - Radiance caching for feeding russian roulette\n// - Tokuyoshi (2023), Efficient Spatial Resampling Using the PDF Similarity\n//\t\t- Not for offline?\n// - Some automatic metric to determine automatically what GMoN blend factor to use\n// - software opacity micromaps\n// - Add parameters to increase the strength of specular / coat darkening\n// - sample BSDF diffuse lobe proba based on its luminance?\n// - how to help with shaders combination compilation times?\n//\t\tRocFFT has some ideas for parallel compilation https://github.com/ROCm/rocFFT/blob/e9303acfb993de98b78358f3bf6fdd93f810f5fd/docs/design/runtime_compilation.rst#parallel-compilation\n//\t\t- wavefront path tracing should help\n//\t\t- Maybe have two sets of shaders:\n//\t\t\t- One that uses the #if for performance\n//\t\t\t- One that uses if() everywhere instead of #if for fast preview\n//\t\t\t\t- to accelerate compilation times: we can use if() everywhere in the code so that switching an option doesn't require a compilation but if we want, we can then apply the options currently selected and compiler everything for maximum performance. This can probably be done with a massive shader that has all the options using if() instead of #if ? Maybe some better alternative though?\n//\t\t\t\t----------- That's a good one too ^\n// - next event estimation++? --> 2023 paper improvement with the octree\n// - ideas of https://pbr-book.org/4ed/Light_Sources/Further_Reading for performance\n// - envmap visibility cache? \n// - If GMoN is enabled, it would be cool to be able to denoise the GMoN blend between GMoN and the default framebuffer but currently the denoiser only denoises the full GMoN and nothing else\n// - Exploiting Visibility Correlation in Direct Illumination\n// - smarter shader cache (hints to avoid using all kernel options when compiling a kernel? We know that Camera ray doesn't care about direct lighting strategy for example)\n// - for LTC sheen lobe, have the option to use either SGGX volumetric sheen or approximation precomputed LTC data\n// - for volumes, we don't have to use the same phase function at each bounce, for artistic control of the \"blur shape\"\n// - --help on the commandline\n// - Normal mapping seems broken again, light rays going under the surface... p1 env light\n// - performance/bias tradeoff by ignoring alpha tests (either for global rays or only shadow rays) after N bounce?\n// - performance/bias tradeoff by ignoring direct lighting occlusion after N bounce? --> strong bias but maybe something to do by reducing the length of shadow rays instead of just hard-disabling occlusion\n// - energy conserving Oren Nayar: https://mimosa-pudica.net/improved-oren-nayar.html#images\n// - experiment with a feature that ignores really dark pixel in the variance estimation of the adaptive \n//\t\tsampling because it seems that very dark areas in the image are always flagged as very \n//\t\tnoisy / very high variance and they take a very long time to converge (always red on the heatmap) \n//\t\teven though they are very dark regions and we don't even noise in them. If our eyes can't see \n//\t\tthe noise, why bother? Same with very bright regions\n// - Reuse miss BSDF ray on the last bounce to sample envmap with MIS\n// - We're using an approximation of the clearcoated BSDF directional albedo for energy compensation right now. The approximation breaks down when what's below the coat is 0.0f roughness. We could potentially bake the directional albedo for a mirror-coated BSDF and interpolate between that mirror-coated LUT and the typical rough-coated BSDF LUT based on the roughness of what's below the coat. This mirror-coated LUT doesn't work very well if there's a smooth-dielectric-coated lambert below the coat so maybe we would need a third LUT for that case\n// - For/switch paradigm for instruction cache misses? https://youtu.be/lxRgmZTEBHM?si=FcaEYqAMVO_QyfwX&t=3061 \n//\t\t- kind of need a good way to profile that to see the difference though\n// - have a light BVH for intersecting light triangles only: useful when we want to know whether or not a direction could have be sampled by the light sampler: we don't need to intersect the whole scene BVH, just the light geometry, less expensive ------> we're going to need another shadow ray though because if we're intersecting solely against the light BVH we don't have the rest of the geometry of the scene to occluded the lights. So we're going to need a shadow ray in case we do hit a light in the light BVH to make sure that light isn't occluded ----> Maybe collect statistics on how many BSDF rays light sample miss lights: this can help see what's going to be the benefit of a light BVH because the drawback of a light BVH is going to be only if we hit a light because then we need another BVH traversal to check for occlusion\n// - shadow terminator issue on sphere low smooth scene: [Taming the Shadow Terminator], Matt Jen-Yuan Chiang, https://github.com/aconty/aconty/blob/main/pdf/bump-terminator-nvidia2019.pdf\n// - use HIP/CUDA graphs to reduce launch overhead\n// - linear interpolation (spatial, object space, world space) function for the parameters of the BSDF\n// - compensated importance sampling of envmap\n// - Product importance sampling envmap: https://github.com/aconty/aconty/blob/main/pdf/fast-product-importance-abstract.pdf\n// - multiple GLTF, one GLB for different point of views per model\n// - CTRL + mouse wheel for zoom in viewport, CTRL click reset zoom\n// - clay render\n// - build BVHs one by one to avoid big memory spike? but what about BLAS performance cost?\n// - play with SBVH building parameters alpha/beta for memory/performance tradeoff + ImGui for that\n// - ability to change the color of the heatmap shader in ImGui\n// - do not store alpha from envmap\n// - fixed point 18b RGB for envmap? 70% size reduction compared to full size. Can't use texture sampler though. Is not using a sampler ok performance-wise? --> it probably is since we're probably memory lantency bound, not memory bandwidth\n// - look at blender cycles \"medium contrast\", \"medium low constract\", \"medium high\", ... --> filmic tonemapper does it?\n// - normal mapping strength\n// - blackbody light emitters\n// - ACES mapping --> filmic tonemapper may be more comprehensive\n// - better post processing: contrast, low, medium, high exposure curve --> filmic tonemapper\n// - bloom post processing\n// - BRDF swapper ImGui : Disney, Lambertian, Oren Nayar, Cook Torrance, Perfect fresnel dielectric reflect/transmit\n// - choose principled BSDF diffuse model (disney, lambertian, oren nayar)\n// - portal envmap sampling --> choose portals with ImGui\n// - find a way to not fill the texcoords buffer for meshes that don't have textures\n// - pack CPUMaterial informations such as texture indices (we can probably use 16 bit for a texture index --> 2 texture indices in one 32 bit register)\n// - use 8 bit textures for material properties instead of float\n// - use fixed point 8 bit for materials parameters in [0, 1], should be good enough\n// - log size of buffers used: vertices, indices, normals, ...\n// - log memory size of buffers used: vertices, indices, normals, ...\n// - able / disable normal mapping\n// - use only one channel for material property texture to save VRAM\n// - Remove vertex normals for meshes that have normal maps and save VRAM\n// - texture compression\n// - WUFFS for image loading?\n// - float compression for render buffers?\n// - Exporter (just serialize the scene to binary file and have a look at how to do backward compatibility)\n// - Allow material parameters textures manipulation with ImGui\n// - Disable material parameters in ImGui that have a texture associated (since the ImGui slider in this case has no effect)\n// - Upload grayscale texture (roughness, specular and other BSDF parameters basically) as one channel to the GPU instead of memory costly RGBA\n// - Emissive textures sampling: how to sample an object that has an emissive texture? How to know which triangles of the mesh are covered by the emissive parts of the texture?\n// - stream compaction / active thread compaction (ingo wald 2011)\n// - sample regeneration\n// - Spectral rendering / look at gemstone rendering because they quite a lot of interesting lighting effect to take into account (pleochroism, birefringent, dispersion, ...)\n// - structure of arrays instead of arrays of struct relevant for global buffers in terms of performance?\n// - data packing in buffer --> use one 32 bit buffer to store multiple information if not using all 32 bits\n//\t\t- pack active pixel in same buffer as pixel sample count\n// - pack two texture indices in one int for register saving, 65536 (16 bit per index when packed) textures is enough\n// - hint shadow rays for better traversal perf on RDNA3?\n// - benchmarker to measure frame times precisely (avg, std dev, ...) + fixed random seed for reproducible results\n// - alias table for sampling env map instead of log(n) binary search\n// - image comparator slider (to have adaptive sampling view + default view on the same viewport for example)\n// - thin materials\n// - Have the UI run at its own framerate to avoid having the UI come to a crawl when the path tracing is expensive\n// - When modifying the emission of a material with the material editor, it should be reflected in the scene and allow the direct sampling of the geometry so the emissive triangles buffer should be updated\n// - Ray differentials for texture mipampping (better bandwidth utilization since sampling potentially smaller texture --> fit better in cache)\n// - Visualizing ray depth (only 1 frame otherwise it would flicker a lot [or choose the option to have it flicker] )\n// - Visualizing pixel time with the clock() instruction. Pixel heatmap:\n//\t\t- https://developer.nvidia.com/blog/profiling-dxr-shaders-with-timer-instrumentation/\n//\t\t- https://github.com/libigl/libigl/issues/1388\n//\t\t- https://github.com/libigl/libigl/issues/1534\n// - Visualizing russian roulette depth termination\n// - Statistics on russian roulette efficiency\n// - feature to disable ReSTIR after a certain percentage of convergence --> we don't want to pay the full price of resampling and everything only for a few difficult isolated pixels (especially true with adaptive sampling where neighbors don't get sampled --> no new samples added to their reservoir --> no need to resample)\n// - Realistic Camera Model\n// - Focus blur\n// - Flakes BRDF (maybe look at OSPRay implementation for a reference ?)\n// - ImGuizmo for moving objects in the scene\n// - choose denoiser quality in imgui\n// - try async buffer copy for the denoiser (maybe run a kernel to generate normals and another to generate albedo buffer before the path tracing kernel to be able to async copy while the path tracing kernel is running?)\n// - write scene details to imgui (nb vertices, triangles, ...)\n// - choose env map at runtime imgui\n// - choose scene file at runtime imgui\n// - lock camera checkbox to avoid messing up when big render in progress\n// - PBRT v3 scene parser\n// - implement ideas of https://blog.selfshadow.com/publications/s2017-shading-course/imageworks/s2017_pbs_imageworks_slides_v2.pdf\n// - Efficiency Aware Russian roulette and splitting\n// - ReSTIR PT\n\nvoid glfw_window_resized_callback(GLFWwindow* window, int width, int height)\n{\n\tint new_width_pixels, new_height_pixels;\n\tglfwGetFramebufferSize(window, &new_width_pixels, &new_height_pixels);\n\n\tif (new_width_pixels == 0 || new_height_pixels == 0)\n\t\t// This probably means that the application has been minimized, we're not doing anything then\n\t\treturn;\n\telse\n\t{\n\t\t// We've stored a pointer to the RenderWindow in the \"WindowUserPointer\" of glfw\n\t\tRenderWindow* render_window = reinterpret_cast<RenderWindow*>(glfwGetWindowUserPointer(window));\n\t\trender_window->resize(width, height);\n\t}\n}\n\n// Implementation from https://learnopengl.com/In-Practice/Debugging\nvoid APIENTRY RenderWindow::gl_debug_output_callback(GLenum source,\n\tGLenum type,\n\tGLuint id,\n\tGLenum severity,\n\tGLsizei length,\n\tconst GLchar* message,\n\tconst void* userParam)\n{\n\t// ignore non-significant error/warning codes\n\tif (id == 131169 || id == 131185 || id == 131218 || id == 131204) \n\t\treturn;\n\n\tif (id == 131154)\n\t\t// NVIDIA specific warning\n\t\t// Pixel-path performance warning: Pixel transfer is synchronized with 3D rendering.\n\t\t// \n\t\t// Mainly happens when we take a screenshot\n\t\treturn;\n\n\tif (id == 131154)\n\t\t// NVIDIA specific warning\n\t\t// Pixel-path performance warning: Pixel transfer is synchronized with 3D rendering.\n\t\t// \n\t\t// Mainly happens when we take a screenshot\n\t\treturn;\n\n\tstd::string source_str;\n\tstd::string type_str;\n\tstd::string severity_str;\n\n\tswitch (source)\n\t{\n\tcase GL_DEBUG_SOURCE_API:             source_str = \"Source: API\"; break;\n\tcase GL_DEBUG_SOURCE_WINDOW_SYSTEM:   source_str = \"Source: Window System\"; break;\n\tcase GL_DEBUG_SOURCE_SHADER_COMPILER: source_str = \"Source: Shader Compiler\"; break;\n\tcase GL_DEBUG_SOURCE_THIRD_PARTY:     source_str = \"Source: Third Party\"; break;\n\tcase GL_DEBUG_SOURCE_APPLICATION:     source_str = \"Source: Application\"; break;\n\tcase GL_DEBUG_SOURCE_OTHER:           source_str = \"Source: Other\"; break;\n\t}\n\n\tswitch (type)\n\t{\n\tcase GL_DEBUG_TYPE_ERROR:               type_str = \"Type: Error\"; break;\n\tcase GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR: type_str = \"Type: Deprecated Behaviour\"; break;\n\tcase GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR:  type_str = \"Type: Undefined Behaviour\"; break;\n\tcase GL_DEBUG_TYPE_PORTABILITY:         type_str = \"Type: Portability\"; break;\n\tcase GL_DEBUG_TYPE_PERFORMANCE:         type_str = \"Type: Performance\"; break;\n\tcase GL_DEBUG_TYPE_MARKER:              type_str = \"Type: Marker\"; break;\n\tcase GL_DEBUG_TYPE_PUSH_GROUP:          type_str = \"Type: Push Group\"; break;\n\tcase GL_DEBUG_TYPE_POP_GROUP:           type_str = \"Type: Pop Group\"; break;\n\tcase GL_DEBUG_TYPE_OTHER:               type_str = \"Type: Other\"; break;\n\t}\n\n\tswitch (severity)\n\t{\n\tcase GL_DEBUG_SEVERITY_HIGH:         severity_str = \"Severity: high\"; break;\n\tcase GL_DEBUG_SEVERITY_MEDIUM:       severity_str = \"Severity: medium\"; break;\n\tcase GL_DEBUG_SEVERITY_LOW:          severity_str = \"Severity: low\"; break;\n\tcase GL_DEBUG_SEVERITY_NOTIFICATION: severity_str = \"Severity: notification\"; break;\n\t}\n\n\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \n\t\t\"---------------\\n\"\n\t\t\"Debug message (%d): %s\\n\"\n\t\t\"%s\\n%s\\n%s\\n\\n\", id, message, source_str.c_str(), type_str.c_str(), severity_str.c_str());\n\n\t// The following breaks into the debugger to help pinpoint what OpenGL\n\t// call errored\n\tUtils::debugbreak();\n}\n\nconst std::string RenderWindow::PERF_METRICS_CPU_OVERHEAD_TIME_KEY = \"CPUDisplayTime\";\n\nRenderWindow::RenderWindow(int renderer_width, int renderer_height, std::shared_ptr<HIPRTOrochiCtx> hiprt_oro_ctx) : m_viewport_width(renderer_width), m_viewport_height(renderer_height)\n{\n\t// Adding the size of the windows around the viewport such that these windows\n\t// have their base size and the viewport has the size the the user has asked for\n\t// (through the commandline)\n\tint window_width = renderer_width + ImGuiSettingsWindow::BASE_SIZE;\n\tint window_height = renderer_height + ImGuiLogWindow::BASE_SIZE;\n\n\tinit_glfw(window_width, window_height);\n\tinit_gl(renderer_width, renderer_height);\n\tImGuiRenderer::init_imgui(m_glfw_window);\n\n\tm_application_state = std::make_shared<ApplicationState>();\n\tm_application_settings = std::make_shared<ApplicationSettings>();\n\tm_renderer = std::make_shared<GPURenderer>(this, hiprt_oro_ctx, m_application_settings);\n\tm_gpu_baker = std::make_shared<GPUBaker>(m_renderer);\n\n\t// Disabling auto samples per frame is accumulation is OFF\n\tm_application_settings->auto_sample_per_frame = m_renderer->get_render_settings().accumulate ? m_application_settings->auto_sample_per_frame : false;\n\n\tm_renderer->resize(renderer_width, renderer_height);\n\n\tThreadManager::start_thread(ThreadManager::RENDER_WINDOW_CONSTRUCTOR, [this, renderer_width, renderer_height]() {\n\t\t// m_denoiser->initialize();\n\t\t// m_denoiser = std::make_shared<OpenImageDenoiser>();\n\t\t// m_denoiser->resize(renderer_width, renderer_height);\n\t\t// m_denoiser->set_use_albedo(m_application_settings->denoiser_use_albedo);\n\t\t// m_denoiser->set_use_normals(m_application_settings->denoiser_use_normals);\n\t\t// m_denoiser->finalize();\n\n\t\tm_perf_metrics = std::make_shared<PerformanceMetricsComputer>();\n\n\t\tm_imgui_renderer = std::make_shared<ImGuiRenderer>();\n\t\tm_imgui_renderer->set_render_window(this);\n\n\t\t// Making the render dirty to force a cleanup at startup\n\t\tset_render_dirty(true);\n\t});\n\n\n\t// Cannot create that on a thread since it compiles OpenGL shaders\n\t// which the OpenGL context which is only available to the thread it was created on (the main thread)\n\tm_display_view_system = std::make_shared<DisplayViewSystem>(m_renderer, this);\n\n\t// Same for the screenshoter\n\tm_screenshoter = std::make_shared<Screenshoter>();\n\tm_screenshoter->set_renderer(m_renderer);\n\tm_screenshoter->set_render_window(this);\n}\n\nRenderWindow::~RenderWindow()\n{\n\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Exiting...\");\n\n\t// Hiding the window to show the user that the app has exited. This is basically only useful if the\n\t// wait function call below hangs for a while: we don't want the user to see the application\n\t// frozen in this case. Note that we're *hiding* the window and not *destroying* it because\n\t// destroying the window also destroys the GL context which may cause crashes is some\n\t// other part of the app is still using buffers or whatnot\n\tglfwHideWindow(m_glfw_window);\n\n\t// Waiting for all threads that are currently reading from the disk (for compiling kernels in the background)\n\t// to finish the reading to avoid SEGFAULTING\n\tg_gpu_kernel_compiler.wait_compiler_file_operations();\n\n\t// Waiting for the renderer to finish its frame otherwise\n\t// we're probably going to close the window / destroy the\n\t// GL context / etc... while the renderer might still be\n\t// using so OpenGL Interop buffers --> segfault\n\tm_renderer->synchronize_all_kernels();\n\t// Manually destroying the renderer now before we destroy the GL context\n\t// glfwDestroyWindow()\n\tm_renderer = nullptr;\n\t// Same for the screenshoter\n\tm_screenshoter = nullptr;\n\t// Same for the baker\n\tm_gpu_baker = nullptr;\n\t// Same for the display view system\n\tm_display_view_system = nullptr;\n\t// Same for the imgui renderer\n\tm_imgui_renderer = nullptr;\n\n\tImGui_ImplOpenGL3_Shutdown();\n\tImGui_ImplGlfw_Shutdown();\n\tImGui::DestroyContext();\n\n\tglfwDestroyWindow(m_glfw_window);\n}\n\nvoid RenderWindow::init_glfw(int window_width, int window_height)\n{\n\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Initializing GLFW...\");\n\tif (!glfwInit())\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Could not initialize GLFW...\");\n\n\t\tint trash = std::getchar();\n\n\t\tstd::exit(1);\n\t}\n\n\tglfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);\n\tglfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);\n\tglfwWindowHint(GLFW_OPENGL_DEBUG_CONTEXT, true);\n\n#ifdef __unix__         \n\tm_mouse_interactor = std::make_shared<LinuxRenderWindowMouseInteractor>();\n#elif defined(_WIN32) || defined(WIN32) \n\tm_mouse_interactor = std::make_shared<WindowsRenderWindowMouseInteractor>();\n#endif\n\tm_keyboard_interactor.set_render_window(this);\n\n\tconst GLFWvidmode* mode = glfwGetVideoMode(glfwGetPrimaryMonitor());\n\n\tm_glfw_window = glfwCreateWindow(window_width, window_height, \"HIPRT-Path-Tracer\", NULL, NULL);\n\tif (!m_glfw_window)\n\t{\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Could not initialize the GLFW window...\");\n\n\t\tint trash = std::getchar();\n\n\t\tstd::exit(1);\n\t}\n\n\tglfwMakeContextCurrent(m_glfw_window);\n\t// Setting a pointer to this instance of RenderWindow inside the m_window GLFWwindow so that\n\t// we can retrieve a pointer to this instance of RenderWindow in the callback functions\n\t// such as the window_resized_callback function for example\n\tglfwSetWindowUserPointer(m_glfw_window, this);\n\tglfwSwapInterval(1);\n\tglfwSetWindowSizeCallback(m_glfw_window, glfw_window_resized_callback);\n\tm_mouse_interactor->set_callbacks(m_glfw_window);\n\tm_keyboard_interactor.set_callbacks(m_glfw_window);\n\t\n\tglewInit();\n\n\tTracyGpuContext;\n\n\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"GLFW Initialized!\");\n}\n\nvoid RenderWindow::init_gl(int width, int height)\n{\n\tglViewport(0, 0, width, height);\n\n\t// Initializing the debug output of OpenGL to catch errors\n\t// when calling OpenGL function with an incorrect OpenGL state\n\tint flags;\n\tglGetIntegerv(GL_CONTEXT_FLAGS, &flags);\n\tif (flags & GL_CONTEXT_FLAG_DEBUG_BIT)\n\t{\n\t\tglEnable(GL_DEBUG_OUTPUT);\n\t\tglEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);\n\t\tglDebugMessageCallback(RenderWindow::gl_debug_output_callback, nullptr);\n\t\tglDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, nullptr, GL_TRUE);\n\t}\n}\n\nvoid RenderWindow::resize(int pixels_width, int pixels_height)\n{\n\tif (pixels_width == m_viewport_width && pixels_height == m_viewport_height)\n\t{\n\t\t// Already the right size, nothing to do. This can happen\n\t\t// when the window comes out of the minized state. Getting\n\t\t// in the minimized state triggers a queue_resize event with a new size\n\t\t// of (0, 0) and getting out of the minimized state triggers a queue_resize\n\t\t// event with a size equal to the one before the minimization, which means\n\t\t// that the window wasn't actually resized and there is nothing to do\n\n\t\treturn;\n\t}\n\n\tglViewport(0, 0, pixels_width, pixels_height);\n\n\tm_viewport_width = pixels_width;\n\tm_viewport_height = pixels_height;\n\n\t// Taking resolution scaling into account\n\tfloat& resolution_scale = m_application_settings->render_resolution_scale;\n\tif (m_application_settings->keep_same_resolution)\n\t\t// TODO what about the height changing ?\n\t\tresolution_scale = m_application_settings->target_width / static_cast<float>(pixels_width);\n\n\tint new_render_width = std::floor(pixels_width * resolution_scale);\n\tint new_render_height = std::floor(pixels_height * resolution_scale);\n\n\tif (new_render_height == 0 || new_render_width == 0)\n\t\t// Can happen if resizing the window to a 1 pixel width/height while having a resolution scaling < 1. \n\t\t// Integer maths will round it down to 0\n\t\treturn;\n\t\n\tm_renderer->resize(new_render_width, new_render_height);\n\tm_denoiser->resize(new_render_width, new_render_height);\n\tm_denoiser->finalize();\n\n\tm_display_view_system->resize(new_render_width, new_render_height);\n\n\tset_render_dirty(true);\n}\n\nvoid RenderWindow::change_resolution_scaling(float new_scaling)\n{\n\tfloat new_render_width = std::floor(m_viewport_width * new_scaling);\n\tfloat new_render_height = std::floor(m_viewport_height * new_scaling);\n\n\tm_renderer->resize(new_render_width, new_render_height);\n\tm_denoiser->resize(new_render_width, new_render_height);\n\tm_denoiser->finalize();\n\tm_display_view_system->resize(new_render_width, new_render_height);\n}\n\nint RenderWindow::get_width()\n{\n\treturn m_viewport_width;\n}\n\nint RenderWindow::get_height()\n{\n\treturn m_viewport_height;\n}\n\nbool RenderWindow::is_interacting()\n{\n\treturn m_mouse_interactor->is_interacting() || m_keyboard_interactor.is_interacting();\n}\n\nRenderWindowKeyboardInteractor& RenderWindow::get_keyboard_interactor()\n{\n\treturn m_keyboard_interactor;\n}\n\nstd::shared_ptr<RenderWindowMouseInteractor> RenderWindow::get_mouse_interactor()\n{\n\treturn m_mouse_interactor;\n}\n\nstd::shared_ptr<ApplicationSettings> RenderWindow::get_application_settings()\n{\n\treturn m_application_settings;\n}\n\nstd::shared_ptr<DisplayViewSystem> RenderWindow::get_display_view_system()\n{\n\treturn m_display_view_system;\n}\n\nvoid RenderWindow::update_renderer_view_translation(float translation_x, float translation_y, bool scale_translation)\n{\n\tif (scale_translation)\n\t{\n\t\ttranslation_x *= m_application_state->last_CPU_frame_delta_time_ms / 1000.0f;\n\t\ttranslation_y *= m_application_state->last_CPU_frame_delta_time_ms / 1000.0f;\n\n\t\ttranslation_x *= m_renderer->get_camera().camera_movement_speed * m_renderer->get_camera().user_movement_speed_multiplier;\n\t\ttranslation_y *= m_renderer->get_camera().camera_movement_speed * m_renderer->get_camera().user_movement_speed_multiplier;\n\t}\n\n\tif (translation_x == 0.0f && translation_y == 0.0f)\n\t\treturn;\n\n\tset_render_dirty(true);\n\n\tglm::vec3 translation = glm::vec3(translation_x, translation_y, 0.0f);\n\tm_renderer->translate_camera_view(translation);\n}\n\nvoid RenderWindow::update_renderer_view_rotation(float offset_x, float offset_y)\n{\n\tset_render_dirty(true);\n\n\tfloat rotation_x, rotation_y;\n\n\trotation_x = offset_x / m_viewport_width * M_TWO_PI / m_application_settings->view_rotation_sldwn_x;\n\trotation_y = offset_y / m_viewport_height * M_TWO_PI / m_application_settings->view_rotation_sldwn_y;\n\n\t// Inverting X and Y here because moving your mouse to the right actually means\n\t// rotating the camera around the Y axis\n\tm_renderer->rotate_camera_view(glm::vec3(rotation_y, rotation_x, 0.0f));\n}\n\nvoid RenderWindow::update_renderer_view_zoom(float offset, bool scale_delta_time)\n{\n\tif (scale_delta_time)\n\t\toffset *= m_application_state->last_CPU_frame_delta_time_ms / 1000.0f;\n\toffset *= m_renderer->get_camera().camera_movement_speed * m_renderer->get_camera().user_movement_speed_multiplier;\n\n\tif (offset == 0.0f)\n\t\treturn;\n\n\tset_render_dirty(true);\n\n\tm_renderer->zoom_camera_view(offset);\n}\n\nbool RenderWindow::is_rendering_done()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tbool rendering_done = false;\n\n\t// No more active pixels (in the case of adaptive sampling for example)\n\trendering_done |= !m_renderer->get_status_buffer_values().one_ray_active;\n\n\t// All pixels have converged to the noise threshold given\n\tfloat proportion_converged;\n\tproportion_converged = m_renderer->get_status_buffer_values().pixel_converged_count / static_cast<float>(m_renderer->m_render_resolution.x * m_renderer->m_render_resolution.y);\n\tproportion_converged *= 100.0f; // To percentage as used in the ImGui interface\n\n\t// We're allowed to stop the render after the given proportion of pixel of the image converged if we're actually\n\t// using the pixel stop noise threshold feature (enabled + threshold > 0.0f) or if we're using the\n\t// stop noise threshold but only for the proportion stopping condition (we're not using the threshold of the pixel\n\t// stop noise threshold feature) --> (enabled & adaptive sampling enabled)\n\tbool use_proportion_stopping_condition = (render_settings.stop_pixel_noise_threshold > 0.0f && render_settings.use_pixel_stop_noise_threshold)\n\t\t|| (render_settings.use_pixel_stop_noise_threshold && render_settings.enable_adaptive_sampling);\n\tbool minimum_sample_count_reached = render_settings.sample_number >= m_application_settings->pixel_stop_noise_threshold_min_sample_count || render_settings.enable_adaptive_sampling;\n\trendering_done |= proportion_converged > render_settings.stop_pixel_percentage_converged && use_proportion_stopping_condition && minimum_sample_count_reached;\n\n\t// Max sample count\n\trendering_done |= (m_application_settings->max_sample_count != 0 && render_settings.sample_number + 1 > m_application_settings->max_sample_count);\n\n\t// Max render time\n\tfloat render_time_ms = m_application_state->current_render_time_ms / 1000.0f;\n\trendering_done |= (m_application_settings->max_render_time != 0.0f && render_time_ms >= m_application_settings->max_render_time);\n\n\t// If we are at 0 samples, this means that the render got resetted and so\n\t// the render is not done\n\trendering_done &= render_settings.sample_number > 0;\n\n\tif (rendering_done)\n\t\tset_ImGui_status_text(\"Finished!\");\n\telse\n\t{\n\t\tif (m_imgui_renderer->get_status_text() == \"Finished!\" || m_imgui_renderer->get_status_text() == \"\")\n\t\t\tclear_ImGui_status_text();\n\t}\n\n\treturn rendering_done;\n}\n\nbool RenderWindow::needs_viewport_refresh()\n{\n\t// Update every X seconds\n\tbool enough_time_has_passed = get_time_ms_before_viewport_refresh() <= 0.0f;\n\t// The render was reset and one frame has been rendered\n\tbool render_was_reset = m_application_state->frame_number == 1;\n\t// We always need to update the viewport if real-time rendering\n\tbool realtime_rendering = !m_renderer->get_render_settings().accumulate;\n\tbool force_refresh = m_application_state->force_viewport_refresh;\n\n\tbool needs_refresh = enough_time_has_passed || realtime_rendering || render_was_reset || force_refresh;\n\tif (!needs_refresh)\n\t\treturn false;\n\n\tif (m_renderer->get_gmon_render_pass()->is_render_pass_used())\n\t{\n\t\t// With GMoN however, we want to recompute the GMoN framebuffer with the new samples accumulated so far\n\t\t// before refreshing the viewport\n\n\t\tif (!needs_refresh)\n\t\t\t// No need of \n\t\t\treturn false;\n\n\t\tif (m_renderer->get_gmon_render_pass()->recomputation_completed())\n\t\t\t// We requested a GMoN recomputation before and it is actually complete, we're ready to display\n\t\t\treturn true;\n\t\telse\n\t\t{\n\t\t\t// So if we need a refresh, we're going to request a GMoN computation first\n\t\t\tm_renderer->get_gmon_render_pass()->request_recomputation();\n\n\t\t\treturn false;\n\t\t}\n\t}\n\telse\n\t\t// Not using GMoN\n\t\treturn needs_refresh;\n}\n\nfloat RenderWindow::get_viewport_refresh_delay_ms()\n{\n\tif (m_application_state->current_render_time_ms < 1000.0f)\n\t\t// Always update if less than a second of render time\n\t\treturn 0.0f;\n\telse if (m_application_state->current_render_time_ms > 1000.0f && m_application_state->current_render_time_ms < 5000.0f)\n\t\t// 1s update in between 1s and 5s of total render time\n\t\treturn 1000.0f;\n\telse\n\t\t// Update every 5s otherwise\n\t\treturn 5000.0f;\n}\n\nfloat RenderWindow::get_time_ms_before_viewport_refresh()\n{\n\tfloat time_since_last_refresh = (glfwGetTimerValue() - m_application_state->last_viewport_refresh_timestamp) / static_cast<float>(glfwGetTimerFrequency()) * 1000.0f;\n\treturn get_viewport_refresh_delay_ms() - time_since_last_refresh;\n}\n\nvoid RenderWindow::reset_render()\n{\n\tm_application_settings->last_denoised_sample_count = -1;\n\n\tm_application_state->current_render_time_ms = 0.0f;\n\tm_application_state->render_dirty = false;\n\tm_application_state->frame_number = 0;\n\n\tm_renderer->reset(is_interacting() || m_application_state->interacting_last_frame);\n}\n\nvoid RenderWindow::set_render_dirty(bool render_dirty)\n{\n\tm_application_state->render_dirty = render_dirty;\n}\n\nvoid RenderWindow::set_force_viewport_refresh(bool force_viewport_refresh)\n{\n\tm_application_state->force_viewport_refresh = force_viewport_refresh;\n}\n\nvoid RenderWindow::set_ImGui_status_text(const std::string& status_text)\n{\n\tif (status_text == \"\")\n\t\t// Do not call RenderWindow::set_ImGui_status_text with an empty text.\n\t\t//\n\t\t// To clear the status text, call clear_status_text()\n\t\tUtils::debugbreak();\n\tm_imgui_renderer->set_status_text(status_text);\n}\n\nvoid RenderWindow::clear_ImGui_status_text()\n{\n\tset_ImGui_status_text(\"Rendering...\");\n}\n\nfloat RenderWindow::get_current_render_time()\n{\n\treturn m_application_state->current_render_time_ms;\n}\n\nfloat RenderWindow::get_samples_per_second()\n{\n\treturn m_application_state->samples_per_second;\n}\n\nfloat RenderWindow::compute_samples_per_second()\n{\n\tfloat samples_per_frame = m_renderer->get_render_settings().do_render_low_resolution() ? 1.0f : m_renderer->get_render_settings().samples_per_frame;\n\n\t// Frame time divided by the number of samples per frame\n\t// 1 sample per frame assumed if rendering at low resolution\n\tif (m_application_state->last_GPU_submit_time > 0)\n\t{\n\t\tuint64_t current_time = glfwGetTimerValue();\n\t\tfloat difference_ms = (current_time - m_application_state->last_GPU_submit_time) / static_cast<float>(glfwGetTimerFrequency()) * 1000.0f;\n\n\t\treturn 1000.0f / (difference_ms / samples_per_frame);\n\t}\n\telse\n\t\treturn 0.0f;\n}\n\nfloat RenderWindow::compute_GPU_stall_duration()\n{\n\tif (m_application_settings->GPU_stall_percentage > 0.0f)\n\t{\n\t\tfloat last_frame_time = m_renderer->get_last_frame_time();\n\t\tfloat stall_duration = last_frame_time * (1.0f / (1.0f - m_application_settings->GPU_stall_percentage / 100.0f)) - last_frame_time;\n\n\t\treturn stall_duration;\n\t}\n\n\treturn 0.0f;\n}\n\nfloat RenderWindow::get_UI_delta_time()\n{\n\treturn m_application_state->last_CPU_frame_delta_time_ms;\n}\n\nstd::shared_ptr<OpenImageDenoiser> RenderWindow::get_denoiser()\n{\n\treturn m_denoiser;\n}\n\nstd::shared_ptr<GPURenderer> RenderWindow::get_renderer()\n{\n\treturn m_renderer;\n}\n\nstd::shared_ptr<GPUBaker> RenderWindow::get_baker()\n{\n\treturn m_gpu_baker;\n}\n\nstd::shared_ptr<PerformanceMetricsComputer> RenderWindow::get_performance_metrics()\n{\n\treturn m_perf_metrics;\n}\n\nstd::shared_ptr<Screenshoter> RenderWindow::get_screenshoter()\n{\n\treturn m_screenshoter;\n}\n\nstd::shared_ptr<ImGuiRenderer> RenderWindow::get_imgui_renderer()\n{\n\treturn m_imgui_renderer;\n}\n\nvoid RenderWindow::run()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\tuint64_t timer_frequency = glfwGetTimerFrequency();\n\n\tm_renderer->start_render_thread();\n\n\twhile (!glfwWindowShouldClose(m_glfw_window))\n\t{\n\t\tuint64_t frame_start_time = glfwGetTimerValue();\n\t\t// Saving whether the renderer as finished its frame\n\t\t// at the beginning of this CPU frame. \n\t\t// \n\t\t// If yes, we will use this variable later to record the\n\t\t// whole CPU overhead of launching a new frame + updating the UI\n\t\t// (swapBuffers etc...)\n\t\t//\n\t\t// This is simply done by computing the delta time between\n\t\t// 'frame_start_time' and 'frame_stop_time'. And because the renderer\n\t\t// is done with its frame, a new GPU frame is going to be queued in between\n\t\t// this two timer points so our CPU overhead counter will also take into account\n\t\t// the time taken for launching a new frame so that's perfect\n\t\tbool frame_render_done = m_renderer->frame_render_done();\n\n\t\tglfwPollEvents();\n\t\tglClear(GL_COLOR_BUFFER_BIT);\n\n\t\tm_application_state->render_dirty |= is_interacting();\n\t\tm_application_state->render_dirty |= m_application_state->interacting_last_frame != is_interacting();\n\n\t\trender();\n\t\tm_display_view_system->display();\n\t\tm_imgui_renderer->draw_interface();\n\n\t\t// Measuring the CPU overhead before 'glfwSwapBuffers' because we do not want\n\t\t// to count the VSync as CPU overhead\n\t\tuint64_t cpu_overhead_stop_time = glfwGetTimerValue();\n\n\t\tglfwSwapBuffers(m_glfw_window);\n\t\tTracyGpuCollect;\n\n\t\tfloat delta_time_ms = (glfwGetTimerValue() - frame_start_time) / static_cast<float>(timer_frequency) * 1000.0f;\n\t\tm_application_state->last_CPU_frame_delta_time_ms = delta_time_ms;\n\t\tm_application_state->last_viewport_refresh_timestamp += m_application_state->last_CPU_frame_delta_time_ms;\n\n\t\tif (!is_rendering_done())\n\t\t\tm_application_state->current_render_time_ms += delta_time_ms;\n\n\t\tif (frame_render_done)\n\t\t{\n\t\t\tfloat cpu_overhead_time = (cpu_overhead_stop_time - frame_start_time) / static_cast<float>(timer_frequency) * 1000.0f;\n\t\t\tm_perf_metrics->add_value(RenderWindow::PERF_METRICS_CPU_OVERHEAD_TIME_KEY, cpu_overhead_time);\n\t\t\tm_perf_metrics->add_value(GPURenderer::FULL_FRAME_TIME_WITH_CPU_KEY, cpu_overhead_time + m_perf_metrics->get_current_value(GPURenderer::ALL_RENDER_PASSES_TIME_KEY));\n\t\t}\n\n\t\tm_keyboard_interactor.poll_keyboard_inputs();\n\t}\n}\n\nvoid RenderWindow::render()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\n\t// Boolean local to this function to remember whether or not we need to upload\n\t// the frame result to OpenGL for displaying\n\tstatic bool buffer_upload_necessary = true;\n\n\tif (m_renderer->frame_render_done())\n\t{\n\t\t// ------------------------------------------------------------\n\t\t// Everything that is in there is synchronous with the renderer\n\t\t// ------------------------------------------------------------\n\n\t\tm_renderer->download_status_buffers();\n\n\t\tif (m_application_state->GPU_stall_duration_left > 0 && !is_rendering_done())\n\t\t{\n\t\t\t// If we're stalling the GPU.\n\t\t\t// We're whether or not the rendering is done because we don't need to\n\t\t\t// stall the GPU if the rendering is done\n\n\t\t\tif (m_application_state->GPU_stall_duration_left > 0.0f)\n\t\t\t\t// Updating the duration left to stall the GPU.\n\t\t\t\tm_application_state->GPU_stall_duration_left -= m_application_state->last_CPU_frame_delta_time_ms;\n\t\t}\n\t\telse if (!is_rendering_done() || m_application_state->render_dirty)\n\t\t{\n\t\t\t// To save resources, we're only going to update the viewport only so often because\n\t\t\t// it can be a bit expensive and for offline rendering, we don't need an update every\n\t\t\t// frame, we can afford to update only every few samples (or every few seconds) to save\n\t\t\t// resources\n\t\t\tbool needs_refresh = needs_viewport_refresh();\n\t\t\tif (needs_refresh)\n\t\t\t{\n\t\t\t\t// We can unmap the renderer's buffers so that OpenGL can use them for displaying\n\t\t\t\tm_renderer->unmap_buffers();\n\n\t\t\t\t// Update the display view system so that the display view is changed to the\n\t\t\t\t// one that we want to use (in the DisplayViewSystem's queue)\n\t\t\t\tm_display_view_system->update_selected_display_view();\n\t\t\t\t\n\t\t\t\t// Denoising to fill the buffers with denoised data (if denoising is enabled)\n\t\t\t\tdenoise();\n\n\t\t\t\t// We upload the data to the OpenGL textures for displaying\n\t\t\t\tm_display_view_system->upload_relevant_buffers_to_texture();\n\n\t\t\t\t// We want the next frame to be displayed with the same 'wants_render_low_resolution' setting\n\t\t\t\t// as it was queued with. This is only useful for first frames when getting in low resolution\n\t\t\t\t// (when we start moving the camera for example) or first frames when getting out of low resolution\n\t\t\t\t// (when we stop moving the camera). In such situations, the last kernel launch in the GPU queue is\n\t\t\t\t// a \"first frame\" that was queued with the corresponding wants_render_low_resolution (getting in or out of low resolution).\n\t\t\t\t// and so we want to display it the same way.\n\t\t\t\tm_display_view_system->set_render_low_resolution(m_renderer->was_last_frame_low_resolution());\n\t\t\t\t// Updating the uniforms so that next time we display, we display correctly\n\t\t\t\tm_display_view_system->update_current_display_program_uniforms();\n\n\t\t\t\t// We just displayed so let's reset the timer\n\t\t\t\tm_application_state->last_viewport_refresh_timestamp = glfwGetTimerValue();\n\n\t\t\t\t// We just refreshed so we're clearing the flag\n\t\t\t\tm_application_state->force_viewport_refresh = false;\n\t\t\t}\n\n\t\t\t// We got a frame rendered --> We can compute the samples per second\n\t\t\tm_application_state->samples_per_second = compute_samples_per_second();\n\n\t\t\t// Adding the time for *one* sample to the performance metrics counter\n\t\t\tif (!m_renderer->was_last_frame_low_resolution() && m_application_state->samples_per_second > 0.0f)\n\t\t\t\tm_renderer->update_perf_metrics(m_perf_metrics);\n\n\t\t\trender_settings.wants_render_low_resolution = is_interacting();\n\t\t\tbool samples_per_frame_auto_mode = m_application_settings->auto_sample_per_frame;\n\t\t\tbool current_or_last_frame_low_res = render_settings.do_render_low_resolution() || m_renderer->was_last_frame_low_resolution();\n\t\t\tbool using_debug_kernel = m_renderer->is_using_debug_kernel();\n\t\t\tif ((samples_per_frame_auto_mode && current_or_last_frame_low_res && render_settings.accumulate)\n\t\t\t\t|| using_debug_kernel)\n\t\t\t\t// Only one sample when low resolution rendering.\n\t\t\t\t// \n\t\t\t\t// Also, we only want to apply this if we're accumulating. If we're not accumulating, \n\t\t\t\t// (so we have the renderer in \"interactive mode\") we may want more than 1 sample per frame\n\t\t\t\t// to experiment\n\t\t\t\trender_settings.samples_per_frame = 1;\n\t\t\telse if (m_application_settings->auto_sample_per_frame)\n\t\t\t\t// Otherwise and if the user is using auto samples per frame, we're going to compute\n\t\t\t\t// the appropriate number of samples per frame to use such that the GPU renders a frame\n\t\t\t\t// \"exactly\" as fast as the 'm_application_settings->target_GPU_framerate'\n\t\t\t\t//\n\t\t\t\t// This is to keep the GPU busy and improve rendering performance\n\t\t\t\trender_settings.samples_per_frame = std::min(std::max(1, static_cast<int>(m_application_state->samples_per_second / m_application_settings->target_GPU_framerate)), 65536);\n\n\t\t\tif (m_application_state->render_dirty)\n\t\t\t\treset_render();\n\n\t\t\tm_application_state->GPU_stall_duration_left = compute_GPU_stall_duration();\n\t\t\tm_application_state->interacting_last_frame = is_interacting();\n\n\t\t\t// Queuing a new frame for the GPU to render\n\t\t\tuint64_t current_timestamp = glfwGetTimerValue();\n\t\t\tfloat delta_time_gpu = (current_timestamp - m_application_state->last_GPU_submit_time) / static_cast<float>(glfwGetTimerFrequency()) * 1000.0f;\n\n\t\t\tm_application_state->frame_number++;\n\t\t\tm_application_state->last_GPU_submit_time = current_timestamp;\n\n\t\t\tm_renderer->render(delta_time_gpu, this);\n\n\t\t\tbuffer_upload_necessary = true;\n\t\t}\n\t\telse // The rendering is done\n\t\t{\n\t\t\tbuffer_upload_necessary |= m_display_view_system->update_selected_display_view();\n\n\t\t\tif (m_application_settings->enable_denoising)\n\t\t\t{\n\t\t\t\t// We may still want to denoise on the final frame\n\t\t\t\tif (denoise())\n\t\t\t\t\tbuffer_upload_necessary = true;\n\t\t\t}\n\n\t\t\tif (buffer_upload_necessary)\n\t\t\t{\n\t\t\t\t// Re-uploading only if necessary\n\t\t\t\tm_display_view_system->upload_relevant_buffers_to_texture();\n\n\t\t\t\tbuffer_upload_necessary = false;\n\t\t\t}\n\n\t\t\tm_display_view_system->set_render_low_resolution(m_renderer->was_last_frame_low_resolution());\n\t\t\t// Updating the uniforms if the user touches the post processing parameters\n\t\t\t// or something else (denoiser blend, ...)\n\t\t\tm_display_view_system->update_current_display_program_uniforms();\n\n\t\t\tRendererAnimationState& renderer_animation_state = m_renderer->get_animation_state();\n\t\t\tif (renderer_animation_state.is_rendering_frame_sequence && renderer_animation_state.frames_rendered_so_far < renderer_animation_state.number_of_animation_frames)\n\t\t\t{\n\t\t\t\t// If we're rendering an animation and the frame just converged\n\t\t\t\trenderer_animation_state.ensure_output_folder_exists();\n\t\t\t\tm_screenshoter->write_to_png(renderer_animation_state.get_frame_filepath());\n\t\t\t\t// Indicating that the animations can step forward since we're done\n\t\t\t\t// with this frame\n\t\t\t\trenderer_animation_state.frames_rendered_so_far++;\n\t\t\t\tif (renderer_animation_state.frames_rendered_so_far == renderer_animation_state.number_of_animation_frames)\n\t\t\t\t\t// We just rendered the last frame, deactivating rendering frame sequence state\n\t\t\t\t\trenderer_animation_state.is_rendering_frame_sequence = false;\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\t// Not the last frame\n\t\t\t\t\trenderer_animation_state.can_step_animation = true;\n\n\t\t\t\t\tset_render_dirty(true);\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t\t// Sleeping so that we don't burn the CPU and GPU with the UI drawing\n\t\t\tstd::this_thread::sleep_for(std::chrono::milliseconds(3));\n\t\t}\n\t}\n}\n\nbool RenderWindow::denoise()\n{\n\tHIPRTRenderSettings& render_settings = m_renderer->get_render_settings();\n\tDisplaySettings& display_settings = m_display_view_system->get_display_settings();\n\n\tdisplay_settings.blend_override = -1.0f;\n\n\tif (m_application_settings->enable_denoising)\n\t{\n\t\t// Evaluating all the conditions for whether or not we want to denoise\n\t\t// the current color framebuffer and whether or not we want to display\n\t\t// the denoised framebuffer to the viewport (we may want NOT to display\n\t\t// the denoised framebuffer if we're only denoising when the render is done\n\t\t// but the render isn't done yet. That's just one example)\n\n\n\n\t\t// ---- Utility variables ----\n\t\t// Do we want to denoise only when reaching the rendering is done?\n\t\tbool denoise_when_done = m_application_settings->denoise_when_rendering_done;\n\t\t// Is the rendering done?\n\t\tbool rendering_done = is_rendering_done();\n\t\t// Whether or not we've already denoise the framebuffer after the rendering is done.\n\t\t// This is to avoid denoising again and again the framebuffer when the rendering is done (because that would just be using the machine for nothing)\n\t\tbool final_frame_denoised_already = !m_application_settings->denoiser_settings_changed && rendering_done && m_application_settings->last_denoised_sample_count == render_settings.sample_number;\n\n\n\n\t\t// ---- Conditions for denoising / displaying noisy ----\n\t\t// - Is the rendering done \n\t\t// - And we only want to denoise when the rendering is done\n\t\t// - And we haven't alraedy denoised the final frame\n\t\tbool denoise_rendering_done = rendering_done && denoise_when_done && !final_frame_denoised_already;\n\t\t// Have we rendered enough samples since last time we denoised that we need to denoise again?\n\t\tbool sample_skip_threshold_reached = !denoise_when_done && (render_settings.sample_number - std::max(0, m_application_settings->last_denoised_sample_count) >= m_application_settings->denoiser_sample_skip);\n\t\t// We're also going to denoise if we changed the denoiser settings\n\t\t// (because we need to denoise to reflect the new settings)\n\t\tbool denoiser_settings_changed = m_application_settings->denoiser_settings_changed;\n\n\n\n\n\t\tbool need_denoising = false;\n\t\tbool display_noisy = false;\n\n\t\t// Denoise if:\n\t\t//\t- The render is done and we're denoising when the render \n\t\t//\t- We have rendered enough samples since the last denoise step that we need to denoise again\n\t\t//\t- We're not denoising if we're interacting (moving the camera)\n\t\tneed_denoising |= denoise_rendering_done;\n\t\tneed_denoising |= sample_skip_threshold_reached;\n\t\tneed_denoising |= denoiser_settings_changed;\n\t\tneed_denoising &= !is_interacting();\n\n\t\t// Display the noisy framebuffer if: \n\t\t//\t- We only denoise when the rendering is done but it isn't done yet\n\t\t//\t- We want to denoise every m_application_settings->denoiser_sample_skip samples\n\t\t//\t\tbut we haven't even reached that number yet. We're displaying the noisy framebuffer in the meantime\n\t\t//\t- We're moving the camera\n\t\tdisplay_noisy |= !rendering_done && denoise_when_done;\n\t\tdisplay_noisy |= !sample_skip_threshold_reached && m_application_settings->last_denoised_sample_count == -1 && !rendering_done;\n\t\tdisplay_noisy |= is_interacting();\n\n\t\tif (need_denoising)\n\t\t{\n\t\t\tfloat denoise_duration = 0.0f;\n\t\t\tif (m_application_settings->denoiser_use_interop_buffers)\n\t\t\t\tdenoise_duration = denoise_interop_buffers();\n\t\t\telse\n\t\t\t\tdenoise_duration = denoise_no_interop_buffers();\n\n\t\t\tm_application_settings->last_denoised_duration = denoise_duration;\n\t\t\tm_application_settings->last_denoised_sample_count = render_settings.sample_number;\n\t\t}\n\n\t\tif (display_noisy)\n\t\t\t// We need to display the noisy framebuffer so we're forcing the blending factor to 0.0f to only\n\t\t\t// choose the first view out of the two that are going to be blend (and the first view is the noisy view)\n\t\t\tdisplay_settings.blend_override = 0.0f;\n\n\t\tm_application_settings->denoiser_settings_changed = false;\n\n\t\treturn need_denoising && !display_noisy;\n\t}\n\n\treturn false;\n}\n\nfloat RenderWindow::denoise_interop_buffers()\n{\n\tstd::shared_ptr<OpenGLInteropBuffer<float3>> normals_buffer = nullptr;\n\tstd::shared_ptr<OpenGLInteropBuffer<ColorRGB32F>> albedo_buffer = nullptr;\n\n\tif (m_application_settings->denoiser_use_normals)\n\t\tnormals_buffer = m_renderer->get_denoiser_normals_AOV_interop_buffer();\n\n\tif (m_application_settings->denoiser_use_albedo)\n\t\talbedo_buffer = m_renderer->get_denoiser_albedo_AOV_interop_buffer();\n\n\tauto start = std::chrono::high_resolution_clock::now();\n\tm_denoiser->denoise(m_renderer->get_color_interop_framebuffer(), normals_buffer, albedo_buffer);\n\tauto stop = std::chrono::high_resolution_clock::now();\n\n\tm_denoiser->copy_denoised_data_to_buffer(m_renderer->get_denoised_interop_framebuffer());\n\n\treturn std::chrono::duration_cast<std::chrono::microseconds>(stop - start).count();\n}\n\nfloat RenderWindow::denoise_no_interop_buffers()\n{\n\tstd::shared_ptr<OrochiBuffer<float3>> normals_buffer = nullptr;\n\tstd::shared_ptr<OrochiBuffer<ColorRGB32F>> albedo_buffer = nullptr;\n\n\tif (m_application_settings->denoiser_use_normals)\n\t\tnormals_buffer = m_renderer->get_denoiser_normals_AOV_no_interop_buffer();\n\n\tif (m_application_settings->denoiser_use_albedo)\n\t\talbedo_buffer = m_renderer->get_denoiser_albedo_AOV_no_interop_buffer();\n\n\tauto start = std::chrono::high_resolution_clock::now();\n\tm_denoiser->denoise(m_renderer->get_color_interop_framebuffer(), normals_buffer, albedo_buffer);\n\tauto stop = std::chrono::high_resolution_clock::now();\n\n\tm_denoiser->copy_denoised_data_to_buffer(m_renderer->get_denoised_interop_framebuffer());\n\n\treturn std::chrono::duration_cast<std::chrono::microseconds>(stop - start).count();\n}\n"
  },
  {
    "path": "src/UI/RenderWindow.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef RENDER_WINDOW_H\n#define RENDER_WINDOW_H\n\n#include \"OpenGL/OpenGLProgram.h\"\n#include \"Renderer/OpenImageDenoiser.h\"\n#include \"Renderer/GPURenderer.h\"\n#include \"Renderer/Baker/GPUBaker.h\"\n#include \"UI/ApplicationSettings.h\"\n#include \"UI/ApplicationState.h\"\n#include \"UI/DisplayView/DisplayTextureType.h\"\n#include \"UI/DisplayView/DisplayViewEnum.h\"\n#include \"UI/DisplayView/DisplayViewSystem.h\"\n#include \"UI/ImGui/ImGuiRenderer.h\"\n#include \"UI/PerformanceMetricsComputer.h\"\n#include \"UI/Interaction/RenderWindowKeyboardInteractor.h\"\n#include \"UI/Interaction/RenderWindowMouseInteractor.h\"\n#include \"UI/Screenshoter.h\"\n#include \"Utils/CommandlineArguments.h\"\n\n#include \"GL/glew.h\"\n#include \"GLFW/glfw3.h\"\n\nclass RenderWindow\n{\npublic:\n\tstatic const std::string PERF_METRICS_CPU_OVERHEAD_TIME_KEY;\n\n\tRenderWindow(int width, int height, std::shared_ptr<HIPRTOrochiCtx> hiprt_oro_ctx);\n\t~RenderWindow();\n\n\tvoid init_glfw(int window_width, int window_height);\n\tvoid init_gl(int width, int height);\n\n\tstatic void APIENTRY gl_debug_output_callback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, const void* userParam);\n\tvoid resize(int pixels_width, int pixels_height);\n\tvoid change_resolution_scaling(float new_scaling);\n\n\tint get_width();\n\tint get_height();\n\tbool is_interacting();\n\n\tRenderWindowKeyboardInteractor& get_keyboard_interactor();\n\tstd::shared_ptr<RenderWindowMouseInteractor> get_mouse_interactor();\n\n\tstd::shared_ptr<ApplicationSettings> get_application_settings();\n\tstd::shared_ptr<GPURenderer> get_renderer();\n\tstd::shared_ptr<GPUBaker> get_baker();\n\tstd::shared_ptr<OpenImageDenoiser> get_denoiser();\n\tstd::shared_ptr<PerformanceMetricsComputer> get_performance_metrics();\n\tstd::shared_ptr<Screenshoter> get_screenshoter();\n\tstd::shared_ptr<ImGuiRenderer> get_imgui_renderer();\n\n\tstd::shared_ptr<DisplayViewSystem> get_display_view_system();\n\n\t/**\n\t * Translates the camera along its X and Y axis by translation_x and translation_y respectively.\n\t * \n\t * If scale_translation is true, translation_x and translation_y are scaled by the delta time of\n\t * the application and the camera speed before the translation is applied to the camera. You may\n\t * want to set scale_translation to false when translation the camera with a mouse pan and scale it\n\t * manually by a predefined arbitrary factor\n \t */\n\tvoid update_renderer_view_translation(float translation_x, float translation_y, bool scale_translation);\n\tvoid update_renderer_view_zoom(float offset, bool scale_delta_time);\n\tvoid update_renderer_view_rotation(float offset_x, float offset_y);\n\n\t/**\n\t * Returns true if the renderer is not sampling the image anymore. \n\t * This can be the case if all pixels have converged according to\n\t * adaptive sampling or if the maximum number of samples specified by\n\t * the user has been reached or if the maximum render time has been reached, etc...\n\t */\n\tbool is_rendering_done();\n\tbool needs_viewport_refresh();\n\t/**\n\t * Computes how long to wait between each viewport\n\t * refresh given the current render time\n\t * \n\t * For a total current render time less than 1s, this returns 0.0f for example, \n\t * meaning that the viewport should always immediately be refreshed.\n\t * \n\t * ...\n\t * \n\t * See the function for the rest of the delays\n\t */\n\tfloat get_viewport_refresh_delay_ms();\n\t/**\n\t * Returns the time in milliseconds before the next viewport refresh when rendering offline.\n\t * \n\t * Returns 0.0f if not rendering offline (i.e. rendering realtime)\n\t */\n\tfloat get_time_ms_before_viewport_refresh();\n\tvoid reset_render();\n\tvoid set_render_dirty(bool render_dirty);\n\t/**\n\t * When offline rendering, the viewport isn't always refreshed at each frame because \n\t * refreshing consumes resources \n\t */\n\tvoid set_force_viewport_refresh(bool force_viewport_refresh);\n\n\t/**\n\t * Sets the status text of the ImGui UI to indicate some kind of information to the user.\n\t */\n\tvoid set_ImGui_status_text(const std::string& status_text);\n\tvoid clear_ImGui_status_text();\n\n\tfloat get_current_render_time();\n\tfloat get_samples_per_second();\n\t/**\n\t * Computes the number of samples per second as seen from the render window. \"As seen by the render window\"\n\t * means that the GPU stall percentage is taken into account for example\n\t */\n\tfloat compute_samples_per_second();\n\t/**\n\t * Returns how long the GPU has to stall for before next frame according to the application settings\n\t */\n\tfloat compute_GPU_stall_duration();\n\n\t/**\n\t * Returns the time in milliseconds of the last UI frame (decoupled from the renderer's frame)\n\t */\n\tfloat get_UI_delta_time();\n\n\tvoid run();\n\tvoid render();\n\t/**\n\t * Denoises the color framebuffer if necessary (according to ImGui\n\t * parameters such as denoiser sample skip, only denoise when rendering done, ...)\n\t * \n\t * Returns true if denoising occured and if the application needs to display the denoised data.\n\t * False otherwise.\n\t * \n\t * That return value can be usedf to decide whether or not we need to upload to denoised data\n\t * to OpenGL or not (we need to upload it if something new was denoised AND the application\n\t * wants to display the denoised data. This condition corresponds exactly to the returned value)\n\t */\n\tbool denoise();\n\n\t// TODO: why is this not in linux mouse interactor only and why is it in renderwindow ?\n\tstd::pair<float, float> get_cursor_position()\n\t{\n\t\treturn m_cursor_position;\n\t}\n\n\tvoid set_cursor_position(std::pair<float, float> new_cursor_position)\n\t{\n\t\tm_cursor_position = new_cursor_position;\n\t}\n\nprivate:\n\tint m_viewport_width, m_viewport_height;\n\n\t/**\n\t * Denoises the renderer framebuffer using the OpenGL interop buffers.\n\t *\n\t * Using the OpenGL interop buffers greatly improves the performance of the\n\t * denoiser but they seem to be a bit slower to manipulate from the path\n\t * tracing shaders --> a bit slower path tracing performance.\n\t * \n\t * This may only be true on AMD GPUs though where the OpenGL interop driver implementation\n\t * seems to be pretty bad\n\t * \n\t * This function should not be called directly. ::denoise() should be called\n\t * and it will take care of calling the right function depending on the value \n\t * of ApplicationSettings::denoiser_use_interop_buffers\n\t */\n\tfloat denoise_interop_buffers();\n\n\t/**\n\t * Denoises the renderer framebuffer not using the OpenGL interop buffers.\n\t * \n\t * This makes the denoising more expensive but increases path tracing performance\n\t * a bit (on AMD GPUs at least)\n\t * \n\t * This function should not be called directly. ::denoise() should be called\n\t * and it will take care of calling the right function depending on the value \n\t * of ApplicationSettings::denoiser_use_interop_buffers\n\t */\n\tfloat denoise_no_interop_buffers();\n\n\t// All the settings of the application (that can, for the most part, be controlled\n\t// through ImGui)\n\tstd::shared_ptr<ApplicationSettings> m_application_settings;\n\tstd::shared_ptr<ApplicationState> m_application_state;\n\n\tstd::shared_ptr<GPURenderer> m_renderer;\n\tstd::shared_ptr<GPUBaker> m_gpu_baker;\n\tstd::shared_ptr<OpenImageDenoiser> m_denoiser;\n\tstd::shared_ptr<PerformanceMetricsComputer> m_perf_metrics;\n\tstd::shared_ptr<Screenshoter> m_screenshoter;\n\n\t// Encapsulates the handling of display views\n\tstd::shared_ptr<DisplayViewSystem> m_display_view_system;\n\n\tGLFWwindow* m_glfw_window;\n\tstd::shared_ptr<ImGuiRenderer> m_imgui_renderer;\n\n\tstd::shared_ptr<RenderWindowMouseInteractor> m_mouse_interactor;\n\tRenderWindowKeyboardInteractor m_keyboard_interactor;\n\n\tstd::pair<float, float> m_cursor_position;\n};\n\n#endif\n"
  },
  {
    "path": "src/UI/Screenshoter.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"GL/glew.h\"\n#include \"stb_image_write.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"UI/RenderWindow.h\"\n#include \"UI/Screenshoter.h\"\n#include \"Utils/Utils.h\"\n\nextern ImGuiLogger g_imgui_logger;\n\nScreenshoter::Screenshoter()\n{\n\tstd::vector<std::string> macro = { \"#define COMPUTE_SCREENSHOTER\" };\n\n\tOpenGLShader default_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/default_display.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader blend_2_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/blend_2_display.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader normal_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/normal_display.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader albedo_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/albedo_display.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader adaptive_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/heatmap_int.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader pixel_converged_map_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/boolmap_int.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\tOpenGLShader white_furnace_display_shader = OpenGLShader(GLSL_SHADERS_DIRECTORY \"/white_furnace_threshold.frag\", OpenGLShader::COMPUTE_SHADER, macro);\n\n\tstd::shared_ptr<OpenGLProgram> default_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> blend_2_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> normal_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> albedo_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> pixel_convergence_heatmap_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> pixel_converged_map_display_program = std::make_shared<OpenGLProgram>();\n\tstd::shared_ptr<OpenGLProgram> white_furnace_display_program = std::make_shared<OpenGLProgram>();\n\n\tdefault_display_program->attach(default_display_shader);\n\tdefault_display_program->link();\n\n\tblend_2_display_program->attach(blend_2_display_shader);\n\tblend_2_display_program->link();\n\n\tnormal_display_program->attach(normal_display_shader);\n\tnormal_display_program->link();\n\n\talbedo_display_program->attach(albedo_display_shader);\n\talbedo_display_program->link();\n\n\tpixel_convergence_heatmap_display_program->attach(adaptive_display_shader);\n\tpixel_convergence_heatmap_display_program->link();\n\n\tpixel_converged_map_display_program->attach(pixel_converged_map_shader);\n\tpixel_converged_map_display_program->link();\n\n\twhite_furnace_display_program->attach(white_furnace_display_shader);\n\twhite_furnace_display_program->link();\n\n\tm_compute_programs[DisplayViewType::DEFAULT] = default_display_program;\n\tm_compute_programs[DisplayViewType::GMON_BLEND] = blend_2_display_program;\n\tm_compute_programs[DisplayViewType::DENOISED_BLEND] = blend_2_display_program;\n\tm_compute_programs[DisplayViewType::DISPLAY_DENOISER_ALBEDO] = albedo_display_program;\n\tm_compute_programs[DisplayViewType::DISPLAY_DENOISER_NORMALS] = normal_display_program;\n\tm_compute_programs[DisplayViewType::PIXEL_CONVERGENCE_HEATMAP] = pixel_convergence_heatmap_display_program;\n\tm_compute_programs[DisplayViewType::PIXEL_CONVERGED_MAP] = pixel_converged_map_display_program;\n\tm_compute_programs[DisplayViewType::WHITE_FURNACE_THRESHOLD] = white_furnace_display_program;\n\n\tselect_compute_program(DisplayViewType::DEFAULT);\n}\n\nvoid Screenshoter::set_renderer(std::shared_ptr<GPURenderer> renderer)\n{\n\tm_renderer = renderer;\n}\n\nvoid Screenshoter::set_render_window(RenderWindow* render_window)\n{\n\tm_render_window = render_window;\n}\n\nvoid Screenshoter::select_compute_program(DisplayViewType display_view)\n{\n\tm_active_compute_program = m_compute_programs[display_view];\n}\n\nvoid Screenshoter::write_to_png()\n{\n\tstd::stringstream filename;\n\n\tUtils::get_current_date_string(filename);\n\tfilename << \" - \" << m_renderer->get_render_settings().sample_number << \"spp - \" << m_render_window->get_current_render_time() / 1000.0f << \"s\" << \".png\";\n\n\twrite_to_png(filename.str().c_str());\n}\n\nvoid Screenshoter::resize_output_image(int width, int height)\n{\n\tbool texture_needs_creation = false;\n\tif (m_compute_output_image_width == -1)\n\t\ttexture_needs_creation = true;\n\telse if (m_compute_output_image_width != width || m_compute_output_image_height != height)\n\t{\n\t\tglDeleteTextures(1, &m_output_image);\n\t\ttexture_needs_creation = true;\n\t}\n\n\tif (texture_needs_creation)\n\t{\n\t\tm_compute_output_image_width = width;\n\t\tm_compute_output_image_height = height;\n\n\t\tglGenTextures(1, &m_output_image);\n\t\tglActiveTexture(GL_TEXTURE0 + DisplayViewSystem::DISPLAY_COMPUTE_IMAGE_UNIT);\n\t\tglBindTexture(GL_TEXTURE_2D, m_output_image);\n\t\tglTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA8UI, width, height);\n\t\tglTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0);\n\t\tglBindImageTexture(/* location in the shader */ 2, m_output_image, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_RGBA8UI);\n\t}\n\telse\n\t{\n\t\tglActiveTexture(GL_TEXTURE0 + DisplayViewSystem::DISPLAY_COMPUTE_IMAGE_UNIT);\n\t\tglBindTexture(GL_TEXTURE_2D, m_output_image);\n\t}\n}\n\nvoid Screenshoter::write_to_png(std::string filepath)\n{\n\tImage8Bit image = get_image();\n\tif (image.write_image_png(filepath.c_str(), true))\n\t\tg_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Screenshot written to \\\"%s\\\"\", filepath.c_str());\n}\n\nImage8Bit Screenshoter::get_image()\n{\n\tint width = m_renderer->m_render_resolution.x;\n\tint height = m_renderer->m_render_resolution.y;\n\n\t// We're using OpenGL compute shader here and not an HIP kernel because we want to be able to use the same \n\t// fragment shader files that we use for the displaying. If we were doing the post-processing with an HIP kernel, \n\t// we would have to write HIP kernels that would basically be copy-pasting of the OpenGL display shaders \n\t// with just some syntax changes. That would basically mean duplicating code which would be annoying to \n\t// maintain because we would have to update the HIP kernels everytime we changed the OpenGL display shader \n\t// so that the screenshoter outputs the correct image (and needless to say that we would forget, most of time, \n\t// to update the HIP kernels so that's why code duplication here is annoying)\n\n\tm_renderer->synchronize_all_kernels();\n\tm_renderer->unmap_buffers();\n\t// We upload the data to the OpenGL textures for displaying\n\tm_render_window->get_display_view_system()->upload_relevant_buffers_to_texture();\n\n\tresize_output_image(width, height);\n\tselect_compute_program(m_render_window->get_display_view_system()->get_current_display_view_type());\n\n\tGLint threads[3];\n\tm_active_compute_program->get_compute_threads(threads);\n\n\tint nb_groups_x = std::ceil(width / (float)threads[0]);\n\tint nb_groups_y = std::ceil(height / (float)threads[1]);\n\n\tDisplayViewSystem::update_display_program_uniforms(m_render_window->get_display_view_system().get(), m_active_compute_program, m_renderer, m_render_window->get_application_settings());\n\n\tglDispatchCompute(nb_groups_x, nb_groups_y, 1);\n\tglMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);\n\n\tstd::vector<unsigned char> mapped_data(width * height * 4);\n\tglGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA_INTEGER, GL_UNSIGNED_BYTE, mapped_data.data());\n\n\treturn Image8Bit(mapped_data, width, height, 4);\n}\n\n"
  },
  {
    "path": "src/UI/Screenshoter.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef SCREENSHOTER_H\n#define SCREENSHOTER_H\n\n#include \"GL/glew.h\"\n#include \"OpenGL/OpenGLProgram.h\"\n#include \"Renderer/GPURenderer.h\"\n\n#include <unordered_map>\n\nclass RenderWindow;\n\nclass Screenshoter\n{\npublic:\n\tScreenshoter();\n\n\tvoid set_renderer(std::shared_ptr<GPURenderer> renderer);\n\tvoid set_render_window(RenderWindow* render_window);\n\n\tvoid select_compute_program(DisplayViewType display_view);\n\tvoid resize_output_image(int width, int height);\n\n\n\t/**\n\t * A filename with a time stamp, the render resolution and the\n\t * number of samples is automatically generated:\n\t * \n\t * 03.17.2024 1024sp @ 1280x720.png\n\t * \n\t * for example\n\t */\n\tvoid write_to_png();\n\tvoid write_to_png(std::string filepath);\n\n\tImage8Bit get_image();\n\nprivate:\n\tstd::shared_ptr<GPURenderer> m_renderer = nullptr;\n\tRenderWindow* m_render_window = nullptr;\n\n\t/**\n\t * Compute shader program to use for the screenshot given a certain display view type\n\t */\n\tstd::unordered_map<DisplayViewType, std::shared_ptr<OpenGLProgram>> m_compute_programs;\n\t/**\n\t * Compute shader program currently used for the screenshots\n\t */\n\tstd::shared_ptr<OpenGLProgram> m_active_compute_program;\n\n\tGLuint m_output_image = 0;\n\tint m_compute_output_image_width = -1;\n\tint m_compute_output_image_height = -1;\n};\n\n#endif"
  },
  {
    "path": "src/Utils/CommandlineArguments.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Utils/CommandlineArguments.h\"\n\nconst std::string CommandlineArguments::DEFAULT_SCENE = DATA_DIRECTORY \"/GLTFs/the-white-room-low.gltf\";\nconst std::string CommandlineArguments::DEFAULT_SKYSPHERE = DATA_DIRECTORY \"/Skyspheres/evening_road_01_puresky_2k.hdr\";\n\nCommandlineArguments CommandlineArguments::process_command_line_args(int argc, char** argv)\n{\n    CommandlineArguments arguments;\n\n    for (int i = 1; i < argc; i++)\n    {\n        std::string string_argv = std::string(argv[i]);\n        if (string_argv.starts_with(\"--sky=\"))\n            arguments.skysphere_file_path = string_argv.substr(6);\n        else if (string_argv.starts_with(\"--samples=\"))\n            arguments.render_samples = std::atoi(string_argv.substr(10).c_str());\n        else if (string_argv.starts_with(\"--bounces=\"))\n            arguments.bounces = std::atoi(string_argv.substr(10).c_str());\n        else if (string_argv.starts_with(\"--w=\"))\n            arguments.render_width = std::atoi(string_argv.substr(4).c_str());\n        else if (string_argv.starts_with(\"--width=\"))\n            arguments.render_width = std::atoi(string_argv.substr(8).c_str());\n        else if (string_argv.starts_with(\"--h=\"))\n            arguments.render_height = std::atoi(string_argv.substr(4).c_str());\n        else if (string_argv.starts_with(\"--height=\"))\n            arguments.render_height = std::atoi(string_argv.substr(9).c_str());\n        else\n            //Assuming scene file path\n            arguments.scene_file_path = string_argv;\n    }\n\n    return arguments;\n}\n"
  },
  {
    "path": "src/Utils/CommandlineArguments.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef COMMANDLINE_ARGUMENTS_H\n#define COMMANDLINE_ARGUMENTS_H\n\n#include <iostream>\n\nstruct CommandlineArguments\n{\n    static const std::string DEFAULT_SCENE;\n    static const std::string DEFAULT_SKYSPHERE;\n\n    static CommandlineArguments process_command_line_args(int argc, char** argv);\n\n    int render_width = 1280, render_height = 720;\n\n    // Default scene and skysphere paths as expected if running the application from a build\n    // directory inside the repo root folder\n    std::string scene_file_path = DEFAULT_SCENE;\n    std::string skysphere_file_path = DEFAULT_SKYSPHERE;\n\n    int render_samples = 64;\n    int bounces = 8;\n};\n\n#endif\n"
  },
  {
    "path": "src/Utils/Utils.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"stb_image.h\"\n\n#include \"Image/Image.h\"\n#include \"UI/ImGui/ImGuiLogger.h\"\n#include \"Utils/Utils.h\"\n#include \"FLIP.h\"\n#include \"clip.h\"\n\n#include <deque>\n#include <format>\n#include <iostream>\n#include <iomanip> // get_current_date_string()\n#include <OpenImageDenoise/oidn.hpp>\n#include <sstream>\n#include <string>\n#include <string_view>\n\n#if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__)\n#include <Windows.h> // for is_file_on_SSD() and other functions\n#endif\n\nextern ImGuiLogger g_imgui_logger;\n\nstd::vector<unsigned char> Utils::tonemap_hdr_image(const Image32Bit& hdr_image, int sample_number, float gamma, float exposure)\n{\n    return tonemap_hdr_image(reinterpret_cast<const float*>(hdr_image.data().data()), hdr_image.width * hdr_image.height * 3, sample_number, gamma, exposure);\n}\n\nstd::vector<unsigned char> Utils::tonemap_hdr_image(const std::vector<ColorRGB32F>& hdr_image, int sample_number, float gamma, float exposure)\n{\n    return tonemap_hdr_image(reinterpret_cast<const float*>(hdr_image.data()), hdr_image.size() * 3, sample_number, gamma, exposure);\n}\n\nstd::vector<unsigned char> Utils::tonemap_hdr_image(const std::vector<float>& hdr_image, int sample_number, float gamma, float exposure)\n{\n    return tonemap_hdr_image(hdr_image.data(), hdr_image.size(), sample_number, gamma, exposure);\n}\n\nstd::vector<unsigned char> Utils::tonemap_hdr_image(const float* hdr_image, size_t float_count, int sample_number, float gamma, float exposure)\n{\n    std::vector<unsigned char> tonemapped_data(float_count);\n\n#pragma omp parallel for\n    for (int i = 0; i < float_count; i += 3)\n    {\n        ColorRGB32F pixel = ColorRGB32F(hdr_image[i + 0], hdr_image[i + 1], hdr_image[i + 2]) / static_cast<float>(sample_number);\n        ColorRGB32F tone_mapped = ColorRGB32F(1.0f, 1.0f, 1.0f) - exp(-pixel * exposure);\n        ColorRGB32F gamma_corrected = pow(tone_mapped, 1.0f / gamma);\n\n        tonemapped_data[i + 0] = gamma_corrected.r * 255.0f;\n        tonemapped_data[i + 1] = gamma_corrected.g * 255.0f;\n        tonemapped_data[i + 2] = gamma_corrected.b * 255.0f;\n    }\n\n    return tonemapped_data;\n}\n\nvoid Utils::compute_alias_table(const std::vector<float>& input, float in_input_total_sum, std::vector<float>& out_probas, std::vector<int>& out_alias)\n{\n    if (input.size() == 0)\n        return;\n\n    // TODO try using floats here to reduce memory usage during the construction and see if precision is an issue or not\n\n    // A vector of the luminance of all the pixels of the envmap\n    // normalized such that the average of the elements of this vector is 'width*height'\n    double input_total_sum_double = in_input_total_sum;\n    std::vector<double> normalized_elements(input.size());\n    for (int i = 0; i < input.size(); i++)\n    {\n        // Normalized\n        normalized_elements[i] = static_cast<double>(input[i]) / input_total_sum_double;\n\n        // Scale for alias table construction such that the average of\n        // the elements is 1\n        normalized_elements[i] *= input.size();\n    }\n\n    out_probas.resize(input.size());\n    out_alias.resize(input.size());\n\n    std::deque<int> smalls;\n    std::deque<int> larges;\n\n    for (int i = 0; i < normalized_elements.size(); i++)\n    {\n        if (normalized_elements[i] < 1.0f)\n            smalls.push_back(i);\n        else\n            larges.push_back(i);\n    }\n\n    while (!smalls.empty() && !larges.empty())\n    {\n        int small_index = smalls.front();\n        int large_index = larges.front();\n\n        smalls.pop_front();\n        larges.pop_front();\n\n        out_probas[small_index] = normalized_elements[small_index];\n        out_alias[small_index] = large_index;\n\n        normalized_elements[large_index] = (normalized_elements[large_index] + normalized_elements[small_index]) - 1.0f;\n        if (normalized_elements[large_index] > 1.0f)\n            larges.push_back(large_index);\n        else\n            smalls.push_back(large_index);\n    }\n\n    while (!larges.empty())\n    {\n        int index = larges.front();\n        larges.pop_front();\n\n        out_probas[index] = 1.0f;\n    }\n\n    while (!smalls.empty())\n    {\n        int index = smalls.front();\n        smalls.pop_front();\n\n        out_probas[index] = 1.0f;\n    }\n}\n\nvoid Utils::compute_alias_table(const std::vector<float>& input, std::vector<float>& out_probas, std::vector<int>& out_alias, float* out_input_total_sum)\n{\n    float sum = 0.0f;\n    for (float input_element : input)\n        sum += input_element;\n\n    if (out_input_total_sum)\n        *out_input_total_sum = sum;\n\n    Utils::compute_alias_table(input, sum, out_probas, out_alias);\n}\n\nstd::string Utils::file_to_string(const char* filepath)\n{\n    std::ifstream file(filepath);\n    if (!file.is_open())\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Unable to open file %s\", filepath);\n\n        return std::string();\n    }\n\n    std::stringstream buffer;\n    buffer << file.rdbuf();\n\n    return buffer.str();\n}\n\nvoid Utils::get_current_date_string(std::stringstream& ss)\n{\n\tstd::time_t t = std::time(0);\n\tstd::tm* now = std::localtime(&t);\n\n\tss << std::put_time(now, \"%m.%d.%Y.%H.%M.%S\");\n}\n\nvoid* Utils::get_volume_handle_for_file(const char* filePath)\n{\n#if !defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__WIN32__) // Only defining the code on Windows\n    return nullptr;\n#else\n    char volume_path[MAX_PATH];\n    if (!GetVolumePathName(filePath, volume_path, ARRAYSIZE(volume_path)))\n        return nullptr;\n\n    char volume_name[MAX_PATH];\n    if (!GetVolumeNameForVolumeMountPoint(volume_path,\n        volume_name, ARRAYSIZE(volume_name)))\n        return nullptr;\n\n    auto length = strlen(volume_name);\n    if (length && volume_name[length - 1] == L'\\\\')\n        volume_name[length - 1] = L'\\0';\n\n    return CreateFile(volume_name, 0,\n        FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,\n        nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr);\n#endif\n}\n\nbool Utils::is_file_on_ssd(const char* file_path)\n{\n#if !defined(_WIN32) && !defined(_WIN32_WCE) && !defined(__WIN32__)\n    // Not on Windows, haven't written the code to determine that on Linux yet\n    return false;\n#else\n    bool is_ssd{ false };\n    HANDLE volume = get_volume_handle_for_file(file_path);\n    if (volume == INVALID_HANDLE_VALUE)\n    {\n        return false; /*invalid path! throw?*/\n    }\n\n    STORAGE_PROPERTY_QUERY query{};\n    query.PropertyId = StorageDeviceSeekPenaltyProperty;\n    query.QueryType = PropertyStandardQuery;\n    DWORD count;\n    DEVICE_SEEK_PENALTY_DESCRIPTOR result{};\n    if (DeviceIoControl(volume, IOCTL_STORAGE_QUERY_PROPERTY,\n        &query, sizeof(query), &result, sizeof(result), &count, nullptr))\n    {\n        is_ssd = !result.IncursSeekPenalty;\n    }\n    else { /*fails for network path, etc*/ }\n    CloseHandle(volume);\n    return is_ssd;\n#endif\n}\n\n#include \"tinyfiledialogs.h\"\n\nstd::string Utils::open_file_dialog(const char* filter_patterns[], int filter_count)\n{\n    const char* file = tinyfd_openFileDialog(\"let us read the password back\", \"\", filter_count, filter_patterns, NULL, 0);\n    if (file)\n        return std::string(file);\n    else\n        return \"\";\n}\n\nfloat Utils::compute_image_mse(const Image32Bit& reference, const Image32Bit& subject)\n{\n    float mse = 0.0f;\n\n    if (reference.width != subject.width || reference.height != subject.height)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Cannot compute difference between images of different sizes.\");\n        return mse;\n    }\n\n    for (int i = 0; i < reference.width * reference.height; i++)\n    {\n        ColorRGB32F reference_pixel = reference.get_pixel_ColorRGB32F(i);\n        ColorRGB32F subject_pixel = subject.get_pixel_ColorRGB32F(i);\n\n        float diff_r_2 = hippt::square(reference_pixel.r - subject_pixel.r);\n        float diff_g_2 = hippt::square(reference_pixel.g - subject_pixel.g);\n        float diff_b_2 = hippt::square(reference_pixel.b - subject_pixel.b);\n\n        mse += diff_r_2 + diff_g_2 + diff_b_2;\n    }\n\n    mse /= static_cast<float>(reference.width * reference.height);\n\n    return mse;\n}\n\nfloat Utils::compute_image_rmse(const Image32Bit& reference, const Image32Bit& subject)\n{\n    return sqrtf(Utils::compute_image_mse(reference, subject));\n}\n\nfloat Utils::compute_image_weighted_median_FLIP(const Image32Bit& reference_srgb, const Image32Bit& subject_srgb, float** out_error_map)\n{\n\tfloat mean_flip_error = 0.0f;\n\n    Image32Bit reference = reference_srgb.to_linear_rgb();\n    Image32Bit subject = subject_srgb.to_linear_rgb();\n\n    FLIP::Parameters parameters;\n    FLIP::evaluate(reference.data().data(), subject.data().data(), reference.width, reference.height, false, parameters, true, true, mean_flip_error, out_error_map);\n\n    return mean_flip_error;\n}\n\nvoid Utils::copy_u8_image_data_to_clipboard(const std::vector<unsigned char>& data, int width, int height)\n{\n    clip::image_spec spec;\n    spec.width = width;\n    spec.height = height;\n    spec.bits_per_pixel = 32;\n    spec.bytes_per_row = spec.width * 4;\n    spec.red_mask = 0xff;\n    spec.green_mask = 0xff00;\n    spec.blue_mask = 0xff0000;\n    spec.alpha_mask = 0xff000000;\n    spec.red_shift = 0;\n    spec.green_shift = 8;\n    spec.blue_shift = 16;\n    spec.alpha_shift = 24;\n    clip::image img(data.data(), spec);\n\n    if (!clip::set_image(img))\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Failed to copy image to clipboard.\");\n}\n\nvoid Utils::copy_image_to_clipboard(const Image8Bit& image)\n{\n    std::vector<unsigned char> flipped_data(image.width * image.height * 4);\n    for (int y = 0; y < image.height; y++)\n    {\n        for (int x = 0; x < image.width; x++)\n        {\n            int input_index = (x + y * image.width) * image.channels;\n            int output_index = (x + (image.height - 1 - y) * image.width) * 4;\n\n            flipped_data[output_index + 0] = image.data().data()[input_index + 0];\n            flipped_data[output_index + 1] = image.data().data()[input_index + 1];\n            flipped_data[output_index + 2] = image.data().data()[input_index + 2];\n            flipped_data[output_index + 3] = 255;\n        }\n    }\n\n    copy_u8_image_data_to_clipboard(flipped_data, image.width, image.height);\n}\n\nvoid Utils::copy_image_to_clipboard(const Image32Bit& image)\n{\n    std::vector<unsigned char> image_data_8u(image.width * image.height * 4);\n    for (int y = 0; y < image.height; y++)\n    {\n        for (int x = 0; x < image.width; x++)\n        {\n            int input_index = (x + y * image.width) * image.channels;\n            int output_index = (x + (image.height - 1 - y) * image.width) * 4;\n\n            image_data_8u[output_index + 0] = static_cast<unsigned char>(hippt::clamp(0.0f, 1.0f, image.data().data()[input_index + 0]) * 255.0f);\n            image_data_8u[output_index + 1] = static_cast<unsigned char>(hippt::clamp(0.0f, 1.0f, image.data().data()[input_index + 1]) * 255.0f);\n            image_data_8u[output_index + 2] = static_cast<unsigned char>(hippt::clamp(0.0f, 1.0f, image.data().data()[input_index + 2]) * 255.0f);\n            image_data_8u[output_index + 3] = 255;\n        }\n    }\n\n    copy_u8_image_data_to_clipboard(image_data_8u, image.width, image.height);\n}\n\nImage32Bit Utils::OIDN_denoise(const Image32Bit& image, int width, int height, float blend_factor)\n{\n    // Create an Open Image Denoise device\n    static bool device_done = false;\n    static oidn::DeviceRef device;\n    if (!device_done)\n    {\n        // We're going to create a CPU device as there seems to be some issues with the GPU (HIP at least)\n        // device on Linux\n        int num_devices = oidnGetNumPhysicalDevices();\n        for (int i = 0; i < num_devices; i++)\n        {\n            if (static_cast<oidn::DeviceType>(oidnGetPhysicalDeviceInt(i, \"type\")) == oidn::DeviceType::CPU)\n            {\n                device = oidn::newDevice(i);\n                if (device.getHandle() == nullptr)\n                {\n                    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"There was an error getting the device for denoising with OIDN. Perhaps some missing libraries for your hardware?\");\n                    return Image32Bit();\n                }\n                device.commit();\n\n                device_done = true;\n            }\n        }\n    }\n\n    if (!device_done)\n    {\n        // If we couldn't make a CPU device, trying GPU\n        device = oidn::newDevice();\n        if (device.getHandle() == nullptr)\n        {\n            g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"There was an error getting the device for denoising with OIDN. Perhaps some missing libraries for your hardware?\");\n            return Image32Bit();\n        }\n        device.commit();\n\n        device_done = true;\n    }\n\n    if (!device_done)\n    {\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Cannot create any OIDN device, aborting denoising...\");\n        return Image32Bit();\n    }\n\n\n    // Create buffers for input/output images accessible by both host (CPU) and device (CPU/GPU)\n    oidn::BufferRef colorBuf = device.newBuffer(width * height * 3 * sizeof(float));\n    // Create a filter for denoising a beauty (color) image using optional auxiliary images too\n    // This can be an expensive operation, so try no to create a new filter for every image!\n    static oidn::FilterRef filter = device.newFilter(\"RT\"); // generic ray tracing filter\n    filter.setImage(\"color\", colorBuf, oidn::Format::Float3, width, height); // beauty\n    filter.setImage(\"output\", colorBuf, oidn::Format::Float3, width, height); // denoised beauty\n    filter.set(\"hdr\", true); // beauty image is HDR\n    filter.commit();\n    // Fill the input image buffers\n    float* colorPtr = (float*)colorBuf.getData();\n    for (int y = 0; y < height; y++)\n        for (int x = 0; x < width; x++)\n        {\n            int index = y * width + x;\n\n            colorPtr[index * 3 + 0] = image[index * 3 + 0];\n            colorPtr[index * 3 + 1] = image[index * 3 + 1];\n            colorPtr[index * 3 + 2] = image[index * 3 + 2];\n        }\n    // Filter the beauty image\n\n    filter.execute();\n\n    float* denoised_ptr = (float*)colorBuf.getData();\n    Image32Bit output_image(width, height, 3);\n    ColorRGB32F* output_pixels = output_image.get_data_as_ColorRGB32F();\n    for (int y = 0; y < height; y++)\n        for (int x = 0; x < width; x++)\n        {\n            int index = y * width + x;\n\n            ColorRGB32F color = blend_factor * ColorRGB32F(denoised_ptr[index * 3 + 0], denoised_ptr[index * 3 + 1], denoised_ptr[index * 3 + 2]) + (1.0f - blend_factor) * image.get_pixel_ColorRGB32F(index);\n\n            output_pixels[index] = color;\n        }\n\n    const char* errorMessage;\n    if (device.getError(errorMessage) != oidn::Error::None)\n        g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_ERROR, \"Error: %s\", errorMessage);\n\n    return output_image;\n}\n\nvoid Utils::debugbreak()\n{\n#if defined( _WIN32 )\n    __debugbreak();\n#elif defined( __GNUC__ )\n    raise(SIGTRAP);\n#else\n    ;\n#endif\n}\n\n#ifdef _WIN32\n// Code from @jpownby from the GraphicsProgramming Discord\nUtils::AddEnvVarError Utils::windows_add_ENV_var_to_PATH(const wchar_t* env_var_name, std::wstring extra_string)\n{\n    // Get $CUDA_PATH\n    // (this code assumes that the path isn't longer than MAX_PATH;\n    // if you want to be robust against a machine configured for long paths\n    // you could use instead use two calls and dynamically allocate the string\n    // as shown below for $PATH)\n    wchar_t envVarBuffer[MAX_PATH] = { L'\\0' };\n    DWORD envVarValueLength_notIncludingNull = 0;\n    {\n        const auto result = GetEnvironmentVariableW(env_var_name, envVarBuffer, MAX_PATH);\n        if (result != 0)\n        {\n            if (result < MAX_PATH)\n                envVarValueLength_notIncludingNull = result;\n            else\n                // $CUDA_PATH is longer than MAX_PATH; the machine must be configured for long paths\n                return AddEnvVarError::ADD_ENV_VAR_ERROR_VALUE_TOO_LONG;\n        }\n        else\n        {\n            const auto errorCode = GetLastError();\n            if (errorCode == ERROR_ENVVAR_NOT_FOUND)\n                // The given environment variable doesn't exist\n                return AddEnvVarError::ADD_ENV_VAR_ERROR_NOT_FOUND;\n            else\n                return AddEnvVarError::ADD_ENV_VAR_ERROR_UNKNOWN;\n        }\n    }\n\n    if (envVarValueLength_notIncludingNull > 0)\n    {\n        // You could statically allocate an array and hope that it's big enough,\n        // but this code instead makes two calls to GetEnvironmentVariableW() and dynamically allocates the exact amount\n\n        // Get the length of the current $PATH\n        constexpr auto* const environmentVariableName = L\"PATH\";\n        DWORD codeUnitCountOfExistingPath_includingTerminatingNull = 0;\n        {\n            constexpr DWORD returnRequiredSize = 0;\n            codeUnitCountOfExistingPath_includingTerminatingNull = GetEnvironmentVariableW(environmentVariableName, nullptr, returnRequiredSize);\n            if (codeUnitCountOfExistingPath_includingTerminatingNull == 0)\n            {\n                const auto errorCode = GetLastError();\n                if (errorCode == ERROR_ENVVAR_NOT_FOUND)\n                    // $PATH doesn't exist\n                    codeUnitCountOfExistingPath_includingTerminatingNull = 1; // 0 + NULL\n                else\n                    return AddEnvVarError::ADD_ENV_VAR_ERROR_UNKNOWN;\n            }\n        }\n        // Allocate enough space for the current $PATH and the extra path to add\n        const auto pathToAdd = std::format(L\";{}{}\", std::wstring_view(envVarBuffer, envVarValueLength_notIncludingNull), extra_string);\n        const auto codeUnitCountRequired_includingTerminatingNull = codeUnitCountOfExistingPath_includingTerminatingNull + pathToAdd.length();\n        std::wstring path((codeUnitCountRequired_includingTerminatingNull - 1), L'\\0');   // std::wstring automatically deals with the terminating NULL\n        // Get the current $PATH\n        {\n            const auto result = GetEnvironmentVariableW(environmentVariableName, path.data(), codeUnitCountRequired_includingTerminatingNull);\n            if (result != 0)\n            {\n                if (result <= path.length())\n                {\n                    const auto codeUnitCountOfExistingPath_notIncludingTerminatingNull = result;\n                    path.resize(codeUnitCountOfExistingPath_notIncludingTerminatingNull);\n                }\n                else\n                {\n                    // Another process/thread must have changed $PATH to be larger? :/\n                    // const auto codeUnitCountOfExistingPath_includingTerminatingNull = result;\n\n                    return AddEnvVarError::ADD_ENV_VAR_ERROR_UNKNOWN;\n                }\n            }\n            else\n            {\n                // An error happened :(\n                // const auto errorCode = GetLastError();\n\n                return AddEnvVarError::ADD_ENV_VAR_ERROR_UNKNOWN;\n            }\n        }\n\n        // Append the new path\n        path.append(pathToAdd);\n        // Set the updated $PATH\n        if (!SetEnvironmentVariableW(environmentVariableName, path.c_str()))\n        {\n            // An error happened :(\n            // const auto errorCode = GetLastError();\n\n            return AddEnvVarError::ADD_ENV_VAR_ERROR_UNKNOWN;\n        }\n    }\n\n    return AddEnvVarError::ADD_ENV_VAR_ERROR_NONE;\n}\n#endif\n"
  },
  {
    "path": "src/Utils/Utils.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef UTILS_H\n#define UTILS_H\n\n#include \"HostDeviceCommon/Color.h\"\n#include \"Image/Image.h\"\n\n#include <sstream>\n#include <string>\n\nclass Utils\n{\npublic:\n\n    static std::vector<unsigned char> tonemap_hdr_image(const Image32Bit& hdr_image, int sample_number, float gamma, float exposure);\n    static std::vector<unsigned char> tonemap_hdr_image(const std::vector<ColorRGB32F>& hdr_image, int sample_number, float gamma, float exposure);\n    static std::vector<unsigned char> tonemap_hdr_image(const std::vector<float>& hdr_image, int sample_number, float gamma, float exposure);\n    static std::vector<unsigned char> tonemap_hdr_image(const float* hdr_image, size_t size, int sample_number, float gamma, float exposure);\n\n    static void compute_alias_table(const std::vector<float>& input, std::vector<float>& out_probas, std::vector<int>& out_alias, float* out_luminance_total_sum);\n    static void compute_alias_table(const std::vector<float>& input, float in_input_total_sum, std::vector<float>& out_probas, std::vector<int>& out_alias);\n\n    static std::string file_to_string(const char* filepath);\n    static void get_current_date_string(std::stringstream& ss);\n\n    static void* get_volume_handle_for_file(const char* filePath);\n    static bool is_file_on_ssd(const char* file_path);\n    static std::string open_file_dialog(const char* filter_patterns[], int filter_count);\n\n\tstatic float compute_image_mse(const Image32Bit& reference, const Image32Bit& subject);\n\tstatic float compute_image_rmse(const Image32Bit& reference, const Image32Bit& subject);\n    static float compute_image_weighted_median_FLIP(const Image32Bit& reference, const Image32Bit& subject, float** out_error_map);\n\n    static void copy_u8_image_data_to_clipboard(const std::vector<unsigned char>& data, int width, int height);\n    static void copy_image_to_clipboard(const Image8Bit& image);\n    static void copy_image_to_clipboard(const Image32Bit& image);\n\n    /*\n     * A blend factor of 1 gives only the noisy image. 0 only the denoised image\n     */\n    static Image32Bit OIDN_denoise(const Image32Bit& image, int width, int height, float blend_factor);\n\n    /**\n     * Breaks the debugger when calling this function as if a breakpoint was hit. \n     * Useful to be able to inspect the callstack at a given point in the program\n     */\n    static void debugbreak();\n\n#ifdef _WIN32\n    enum AddEnvVarError\n    {\n\t\tADD_ENV_VAR_ERROR_NONE = 0, // All good\n        ADD_ENV_VAR_ERROR_NOT_FOUND, // Given env var not found\n        ADD_ENV_VAR_ERROR_VALUE_TOO_LONG, // The value of the environment variable exceeds MAX_PATH\n        ADD_ENV_VAR_ERROR_UNKNOWN // Unhandled error value\n    };\n\tstatic AddEnvVarError windows_add_ENV_var_to_PATH(const wchar_t* env_var_name, std::wstring extra_string = L\"\");\n#endif\n};\n\n#endif\n"
  },
  {
    "path": "src/llvm-compile-kernel.h",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#ifndef LLVM_COMPILE_KERNEL_H\n#define LLVM_COMPILE_KERNEL_H\n\n#include <hip/hip_vector_types.h>\n#include <hip/hip_runtime.h>\n#include \"hiprt/impl/Math.h\"\n#include \"hiprt/hiprt_device.h\"\n#include \"hiprt/impl/hiprt_device_impl.h\"\n\n//#include \"Device/kernels/Megakernel.h\"\n//#include \"Device/kernels/ReSTIR/ReGIR/GridFillTemporalReuse.h\"\n#include \"Device/kernels/ReSTIR/ReGIR/SpatialReuse.h\"\n//#include \"Device/kernels/ReSTIR/DI/LightsPresampling.h\"\n//#include \"Device/kernels/ReSTIR/DI/SpatialReuse.h\"\n\n__device__ bool filter_function(const hiprtRay& ray, const void* data, void* payload, const hiprtHit& hit);\n\nHIPRT_DEVICE bool intersectFunc(uint32_t geomType, uint32_t rayType, const hiprtFuncTableHeader& tableHeader, const hiprtRay& ray, void* payload, hiprtHit& hit)\n{\n    const uint32_t index = tableHeader.numGeomTypes * rayType + geomType;\n    [[maybe_unused]] const void* data = tableHeader.funcDataSets[index].intersectFuncData;\n    switch (index)\n    {\n    default: { return false; }\n    }\n}\n\nHIPRT_DEVICE bool filterFunc(uint32_t geomType, uint32_t rayType, const hiprtFuncTableHeader& tableHeader, const hiprtRay& ray, void* payload, const hiprtHit& hit)\n{\n    const uint32_t index = tableHeader.numGeomTypes * rayType + geomType;\n    [[maybe_unused]] const void* data = tableHeader.funcDataSets[index].filterFuncData;\n    switch (index)\n    {\n    case 0: { return filter_function(ray, data, payload, hit); }\n    default: { return false; }\n    }\n}\n\nint main()\n{\n    /*HIPRTRenderData dummy;\n\n    int number_of_blocks;\n    int threads_per_block;\n\n    ReSTIR_GI_SpatialReuse<<<dim3(number_of_blocks), dim3(threads_per_block), 0, hipStreamDefault>>>(dummy);*/\n}\n\n#endif\n"
  },
  {
    "path": "src/main.cpp",
    "content": "/*\n * Copyright 2025 Tom Clabault. GNU GPL3 license.\n * GNU GPL3 license copy: https://www.gnu.org/licenses/gpl-3.0.txt\n */\n\n#include \"Image/Image.h\"\n#include \"Renderer/BVH.h\"\n#include \"Renderer/CPURenderer.h\"\n#include \"Renderer/GPURenderer.h\"\n#include \"Scene/Camera.h\"\n#include \"Scene/SceneParser.h\"\n#include \"Threads/ThreadFunctions.h\"\n#include \"Threads/ThreadManager.h\"\n#include \"UI/RenderWindow.h\"\n#include \"Utils/CommandlineArguments.h\"\n#include \"Utils/Utils.h\"\n\n#include \"stb_image_write.h\"\n\n#include <chrono>\n#include <cmath>\n#include <iostream>\n\nextern ImGuiLogger g_imgui_logger;\n\n#define GPU_RENDER 1\n\nint main(int argc, char* argv[])\n{   \n    CommandlineArguments cmd_arguments = CommandlineArguments::process_command_line_args(argc, argv);\n\n    int width = cmd_arguments.render_width;\n    int height = cmd_arguments.render_height;\n\n    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Reading scene file %s...\", cmd_arguments.scene_file_path.c_str());\n\n    std::chrono::high_resolution_clock::time_point start_scene, start_full;\n    std::chrono::high_resolution_clock::time_point stop_scene, stop_full;\n    Scene parsed_scene;\n    SceneParserOptions options(cmd_arguments.scene_file_path);\n\n    options.override_aspect_ratio = (float)width / height;\n    start_scene = std::chrono::high_resolution_clock::now();\n    start_full = std::chrono::high_resolution_clock::now();\n    Assimp::Importer assimp_importer;\n    SceneParser::parse_scene_file(cmd_arguments.scene_file_path, assimp_importer, parsed_scene, options);\n    stop_scene = std::chrono::high_resolution_clock::now();\n\n    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Scene geometry parsed in %ldms\", std::chrono::duration_cast<std::chrono::milliseconds>(stop_scene - start_scene).count());\n    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Reading envmap %s...\", cmd_arguments.skysphere_file_path.c_str());\n\n    // TODO we only need 3 channels for the envmap but the only supported formats are 1, 2, 4 channels in HIP/CUDA, not 3\n    Image32Bit envmap_image;\n    ThreadManager::start_thread(ThreadManager::ENVMAP_LOAD_FROM_DISK_THREAD, ThreadFunctions::read_envmap, std::ref(envmap_image), cmd_arguments.skysphere_file_path, 4, true);\n#if GPU_RENDER\n    std::shared_ptr<HIPRTOrochiCtx> hiprt_orochi_ctx = std::make_shared<HIPRTOrochiCtx>(0);\n\n    RenderWindow render_window(width, height, hiprt_orochi_ctx);\n\n    std::shared_ptr<GPURenderer> renderer = render_window.get_renderer();\n    renderer->set_envmap(envmap_image, cmd_arguments.skysphere_file_path);\n    renderer->set_camera(parsed_scene.camera);\n    renderer->set_scene(parsed_scene);\n\n    // Joining everyone before starting the render except the precompilation threads\n    ThreadManager::join_all_threads({ ThreadManager::GPU_RENDERER_PRECOMPILE_KERNELS_THREAD_KEY, ThreadManager::RENDERER_PRECOMPILE_KERNELS, ThreadManager::RESTIR_DI_PRECOMPILE_KERNELS });\n\n    stop_full = std::chrono::high_resolution_clock::now();\n    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"Full scene parsed & built in %ldms\", std::chrono::duration_cast<std::chrono::milliseconds>(stop_full - start_full).count());\n    renderer->get_hiprt_scene().print_statistics(std::cout);\n\n    // We don't need the scene anymore, we can free it now (freeing the ASSIMP scene data)\n    assimp_importer.FreeScene();\n    // Freeing the renderer's scene data (i.e. the data converted from ASSIMP)\n    parsed_scene = Scene();\n    envmap_image.free();\n\n    render_window.run();\n#else\n\n    width = 639*2;\n    height = 346*2;\n\n    g_imgui_logger.add_line(ImGuiLoggerSeverity::IMGUI_LOGGER_INFO, \"[%dx%d]: %d samples ; %d bounces\\n\\n\", width, height, cmd_arguments.render_samples, cmd_arguments.bounces);\n\n    CPURenderer cpu_renderer(width, height);\n    cpu_renderer.get_render_settings().nb_bounces = cmd_arguments.bounces;\n    cpu_renderer.get_render_settings().samples_per_frame = cmd_arguments.render_samples;\n    cpu_renderer.get_render_settings().output_debug_sample_N = cmd_arguments.render_samples - 1;\n    cpu_renderer.set_envmap(envmap_image);\n    cpu_renderer.set_camera(parsed_scene.camera);\n\n    for (float3& pos : parsed_scene.vertices_positions)\n        pos += make_float3(0, 0.1, 0);\n\n    cpu_renderer.set_scene(parsed_scene);\n\n    ThreadManager::join_all_threads();\n\n    stop_full = std::chrono::high_resolution_clock::now();\n    std::cout << \"Full scene & textures parsed in \" << std::chrono::duration_cast<std::chrono::milliseconds>(stop_full - start_full).count() << \"ms\" << std::endl;\n    cpu_renderer.render();\n    cpu_renderer.tonemap(2.2f, 1.8f);\n\n    /*Image32Bit image_denoised_1 = Utils::OIDN_denoise(cpu_renderer.get_framebuffer(), width, height, 1.0f);\n    Image32Bit image_denoised_075 = Utils::OIDN_denoise(cpu_renderer.get_framebuffer(), width, height, 0.75f);\n    Image32Bit image_denoised_05 = Utils::OIDN_denoise(cpu_renderer.get_framebuffer(), width, height, 0.5f);*/\n\n    cpu_renderer.get_framebuffer().write_image_png(\"CPU_RT_output.png\");\n    /*image_denoised_1.write_image_png(\"CPU_RT_output_denoised_1.png\");\n    image_denoised_075.write_image_png(\"CPU_RT_output_denoised_075.png\");\n    image_denoised_05.write_image_png(\"CPU_RT_output_denoised_05.png\");*/\n#endif\n\n    return 0;\n}\n"
  }
]